diff -pruN 8.17.2-2/.buildkite/Dockerfile 9.2.0-1/.buildkite/Dockerfile
--- 8.17.2-2/.buildkite/Dockerfile	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/.buildkite/Dockerfile	2025-10-28 16:50:53.000000000 +0000
@@ -1,4 +1,4 @@
-ARG PYTHON_VERSION=3.13
+ARG PYTHON_VERSION=3.14
 FROM python:${PYTHON_VERSION}
 
 # Default UID/GID to 1000
diff -pruN 8.17.2-2/.buildkite/functions/imports.sh 9.2.0-1/.buildkite/functions/imports.sh
--- 8.17.2-2/.buildkite/functions/imports.sh	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/.buildkite/functions/imports.sh	2025-10-28 16:50:53.000000000 +0000
@@ -43,7 +43,7 @@ if [[ -z $es_node_name ]]; then
 
 fi
 
-  export script_path=$(dirname $(realpath -s $0))
+  export script_path=$(dirname $(realpath $0))
   source $script_path/functions/cleanup.sh
   source $script_path/functions/wait-for-container.sh
   trap "cleanup_trap ${network_name}" EXIT
diff -pruN 8.17.2-2/.buildkite/pipeline.yml 9.2.0-1/.buildkite/pipeline.yml
--- 8.17.2-2/.buildkite/pipeline.yml	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/.buildkite/pipeline.yml	2025-10-28 16:50:53.000000000 +0000
@@ -5,18 +5,17 @@ steps:
     env:
       PYTHON_VERSION: "{{ matrix.python }}"
       TEST_SUITE: "platinum"
-      STACK_VERSION: "8.17.0-SNAPSHOT"
+      STACK_VERSION: "9.2.0-SNAPSHOT"
       PYTHON_CONNECTION_CLASS: "{{ matrix.connection }}"
       NOX_SESSION: "{{ matrix.nox_session }}"
     matrix:
       setup:
         python:
-          - "3.8"
-          - "3.9"
           - "3.10"
           - "3.11"
           - "3.12"
           - "3.13"
+          - "3.14"
         connection:
           - "urllib3"
           - "requests"
@@ -24,11 +23,11 @@ steps:
           - "test"
       adjustments:
         - with:
-            python: "3.8"
+            python: "3.10"
             connection: "urllib3"
             nox_session: "test_otel"
         - with:
-            python: "3.13"
+            python: "3.14"
             connection: "urllib3"
             nox_session: "test_otel"
     command: ./.buildkite/run-tests
diff -pruN 8.17.2-2/.buildkite/run-elasticsearch.sh 9.2.0-1/.buildkite/run-elasticsearch.sh
--- 8.17.2-2/.buildkite/run-elasticsearch.sh	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/.buildkite/run-elasticsearch.sh	2025-10-28 16:50:53.000000000 +0000
@@ -21,7 +21,7 @@
 # - Moved ELASTIC_PASSWORD and xpack.security.enabled to the base arguments for "Security On by default"
 # - Use https only when TEST_SUITE is "platinum", when "free" use http
 
-script_path=$(dirname $(realpath -s $0))
+script_path=$(dirname $(realpath $0))
 source $script_path/functions/imports.sh
 set -euo pipefail
 
diff -pruN 8.17.2-2/.buildkite/run-tests 9.2.0-1/.buildkite/run-tests
--- 8.17.2-2/.buildkite/run-tests	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/.buildkite/run-tests	2025-10-28 16:50:53.000000000 +0000
@@ -7,10 +7,10 @@
 # Default environment variables
 export STACK_VERSION="${STACK_VERSION:=8.0.0-SNAPSHOT}"
 export TEST_SUITE="${TEST_SUITE:=platinum}"
-export PYTHON_VERSION="${PYTHON_VERSION:=3.13}"
+export PYTHON_VERSION="${PYTHON_VERSION:=3.14}"
 export PYTHON_CONNECTION_CLASS="${PYTHON_CONNECTION_CLASS:=urllib3}"
 
-script_path=$(dirname $(realpath -s $0))
+script_path=$(dirname $(realpath $0))
 source $script_path/functions/imports.sh
 set -euo pipefail
 
diff -pruN 8.17.2-2/.github/workflows/ci.yml 9.2.0-1/.github/workflows/ci.yml
--- 8.17.2-2/.github/workflows/ci.yml	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/.github/workflows/ci.yml	2025-10-28 16:50:53.000000000 +0000
@@ -8,7 +8,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - name: Checkout Repository
-        uses: actions/checkout@v4
+        uses: actions/checkout@v5
       - name: Set up Python 3.x
         uses: actions/setup-python@v5
         with:
@@ -23,7 +23,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - name: Checkout Repository
-        uses: actions/checkout@v4
+        uses: actions/checkout@v5
       - name: Set up Python 3.x
         uses: actions/setup-python@v5
         with:
@@ -38,7 +38,7 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
+        python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
         nox-session: [""]
         runs-on: ["ubuntu-latest"]
 
@@ -47,7 +47,7 @@ jobs:
     continue-on-error: false
     steps:
       - name: Checkout Repository
-        uses: actions/checkout@v4
+        uses: actions/checkout@v5
       - name: Set Up Python - ${{ matrix.python-version }}
         uses: actions/setup-python@v5
         with:
diff -pruN 8.17.2-2/.github/workflows/docs-build.yml 9.2.0-1/.github/workflows/docs-build.yml
--- 8.17.2-2/.github/workflows/docs-build.yml	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/.github/workflows/docs-build.yml	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,19 @@
+name: docs-build
+
+on:
+  push:
+    branches:
+      - main
+  pull_request_target: ~
+  merge_group: ~
+
+jobs:
+  docs-preview:
+    uses: elastic/docs-builder/.github/workflows/preview-build.yml@main
+    with:
+      path-pattern: docs/**
+    permissions:
+      deployments: write
+      id-token: write
+      contents: read
+      pull-requests: write
diff -pruN 8.17.2-2/.github/workflows/docs-cleanup.yml 9.2.0-1/.github/workflows/docs-cleanup.yml
--- 8.17.2-2/.github/workflows/docs-cleanup.yml	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/.github/workflows/docs-cleanup.yml	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,14 @@
+name: docs-cleanup
+
+on:
+  pull_request_target:
+    types:
+      - closed
+
+jobs:
+  docs-preview:
+    uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main
+    permissions:
+      contents: none
+      id-token: write
+      deployments: write
diff -pruN 8.17.2-2/.github/workflows/docs-preview.yml 9.2.0-1/.github/workflows/docs-preview.yml
--- 8.17.2-2/.github/workflows/docs-preview.yml	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/.github/workflows/docs-preview.yml	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
----
-name: docs-preview
-
-on:
-  pull_request_target:
-    types: [opened]
-
-permissions:
-  pull-requests: write
-
-jobs:
-  doc-preview-pr:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: elastic/docs/.github/actions/docs-preview@master
-        with:
-          github-token: ${{ secrets.GITHUB_TOKEN }}
-          repo: ${{ github.event.repository.name }}
-          preview-path: 'guide/en/elasticsearch/client/python-api/index.html'
-          pr: ${{ github.event.pull_request.number }}
diff -pruN 8.17.2-2/CHANGELOG.md 9.2.0-1/CHANGELOG.md
--- 8.17.2-2/CHANGELOG.md	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/CHANGELOG.md	2025-10-28 16:50:53.000000000 +0000
@@ -1 +1 @@
-See: https://www.elastic.co/guide/en/elasticsearch/client/python-api/master/release-notes.html
+See: https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/release-notes.html
diff -pruN 8.17.2-2/CONTRIBUTING.md 9.2.0-1/CONTRIBUTING.md
--- 8.17.2-2/CONTRIBUTING.md	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/CONTRIBUTING.md	2025-10-28 16:50:53.000000000 +0000
@@ -17,7 +17,7 @@ found at `.buildkite/run-elasticsearch.s
 
 There are several environment variables that control integration tests:
 
-- `PYTHON_VERSION`: Version of Python to use, defaults to `3.9`
+- `PYTHON_VERSION`: Version of Python to use, defaults to `3.14`
 - `PYTHON_CONNECTION_CLASS`: Connection class to use, defaults to `Urllib3HttpConnection`
 - `STACK_VERSION`: Version of Elasticsearch to use. These should be
   the same as tags of `docker.elastic.co/elasticsearch/elasticsearch`
diff -pruN 8.17.2-2/README.md 9.2.0-1/README.md
--- 8.17.2-2/README.md	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/README.md	2025-10-28 16:50:53.000000000 +0000
@@ -1,5 +1,5 @@
 <p align="center">
-    <img src="https://github.com/elastic/elasticsearch-py/raw/main/docs/logo-elastic-glyph-color.svg" width="20%" alt="Elastic logo" />
+    <img src="https://github.com/elastic/elasticsearch-py/raw/main/docs/images/logo-elastic-glyph-color.svg" width="20%" alt="Elastic logo" />
 </p>
 
 # Elasticsearch Python Client
@@ -63,24 +63,23 @@ of the getting started documentation.
 
 ## Compatibility
 
-Language clients are forward compatible; meaning that the clients support
-communicating with greater or equal minor versions of Elasticsearch without
-breaking. It does not mean that the clients automatically support new features
-of newer Elasticsearch versions; it is only possible after a release of a new
-client version. For example, a 8.12 client version won't automatically support
-the new features of the 8.13 version of Elasticsearch, the 8.13 client version
-is required for that. Elasticsearch language clients are only backwards
-compatible with default distributions and without guarantees made.
-
-| Elasticsearch Version | Elasticsearch-Python Branch | Supported |
-| --------------------- | ------------------------ | --------- |
-| main                  | main                     |           |
-| 8.x                   | 8.x                      | 8.x       |
-| 7.x                   | 7.x                      | 7.17      |
+Language clients are _forward compatible:_ each client version works with equivalent and later minor versions of Elasticsearch without breaking.
 
+Compatibility does not imply full feature parity. New Elasticsearch features are supported only in equivalent client versions. For example, an 8.12 client fully supports Elasticsearch 8.12 features and works with 8.13 without breaking; however, it does not support new Elasticsearch 8.13 features. An 8.13 client fully supports Elasticsearch 8.13 features.
 
-If you have a need to have multiple versions installed at the same time older
-versions are also released as ``elasticsearch7`` and ``elasticsearch8``.
+| Elasticsearch version | elasticsearch-py branch |
+| --- | --- |
+| main | main |
+| 9.x | 9.x |
+| 9.x | 8.x |
+| 8.x | 8.x |
+
+Elasticsearch language clients are also _backward compatible_ across minor versions &mdash; with default distributions and without guarantees.
+
+> [!TIP]
+> To upgrade to a new major version, first upgrade Elasticsearch, then upgrade the Python Elasticsearch client.
+
+If you need to work with multiple client versions, note that older versions are also released as `elasticsearch7` and `elasticsearch8`.
 
 
 ## Documentation
@@ -91,18 +90,23 @@ Documentation for the client is [availab
 [Read the Docs]: https://elasticsearch-py.readthedocs.io
 
 
-## Feedback 🗣️
+## Try Elasticsearch and Kibana locally
+
+If you want to try Elasticsearch and Kibana locally, you can run the following command:
+
+```bash
+curl -fsSL https://elastic.co/start-local | sh
+```
+
+This will run Elasticsearch at [http://localhost:9200](http://localhost:9200) and Kibana at [http://localhost:5601](http://localhost:5601).
+
+More information is available [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html).
+
+
+## Contributing
 
-The engineering team here at Elastic is looking for developers to participate in
-research and feedback sessions to learn more about how you use our Python client and
-what improvements we can make to their design and your workflow. If you're interested in
-sharing your insights into developer experience and language client design, please fill
-out this [short form]. Depending on the number of responses we get, we may either
-contact you for a 1:1 conversation or a focus group with other developers who use the
-same client. Thank you in advance - your feedback is crucial to improving the user
-experience for all Elasticsearch developers!
+See [CONTRIBUTING.md](./CONTRIBUTING.md)
 
-[short form]: https://forms.gle/bYZwDQXijfhfwshn9
 
 ## License
 
diff -pruN 8.17.2-2/catalog-info.yaml 9.2.0-1/catalog-info.yaml
--- 8.17.2-2/catalog-info.yaml	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/catalog-info.yaml	2025-10-28 16:50:53.000000000 +0000
@@ -45,11 +45,15 @@ spec:
           branch: 'main'
           cronline: '0 10 * * *'
           message: 'Daily run for main branch'
-        Daily 8.14:
-          branch: '8.14'
+        Daily 9.0:
+          branch: '9.0'
           cronline: '0 10 * * *'
-          message: 'Daily run for 8.14 branch'
-        Daily 8.15:
-          branch: '8.15'
+          message: 'Daily run for 9.0 branch'
+        Daily 8.19:
+          branch: '8.19'
           cronline: '0 10 * * *'
-          message: 'Daily run for 8.15 branch'
+          message: 'Daily run for 8.19 branch'
+        Daily 8.18:
+          branch: '8.18'
+          cronline: '0 10 * * *'
+          message: 'Daily run for 8.18 branch'
diff -pruN 8.17.2-2/debian/changelog 9.2.0-1/debian/changelog
--- 8.17.2-2/debian/changelog	2025-03-29 10:11:31.000000000 +0000
+++ 9.2.0-1/debian/changelog	2025-10-29 11:51:11.000000000 +0000
@@ -1,3 +1,53 @@
+python-elasticsearch (9.2.0-1) unstable; urgency=medium
+
+  * New upstream version 9.2.0
+  * d/watch: Convert to version 5
+  * d/control: Update BD
+  * d/rules: Enable test_vectors, 
+    since sentence_transformers no longer has a dependency.
+
+ -- Karsten Schöke <karsten.schoeke@geobasis-bb.de>  Wed, 29 Oct 2025 12:51:11 +0100
+
+python-elasticsearch (9.1.1-2) unstable; urgency=medium
+
+  * change link to elasticsearch9
+
+ -- Karsten Schöke <karsten.schoeke@geobasis-bb.de>  Thu, 02 Oct 2025 13:42:22 +0200
+
+python-elasticsearch (9.1.1-1) unstable; urgency=medium
+
+  * New upstream version 9.1.1
+
+ -- Karsten Schöke <karsten.schoeke@geobasis-bb.de>  Wed, 01 Oct 2025 13:15:07 +0200
+
+python-elasticsearch (9.1.0-3) unstable; urgency=medium
+
+  * Team Upload.
+  * d/control: Add Dependency on python3-elastic-transport >=9.1 and
+    << 10 (Closes: #1110798).
+
+ -- Emmanuel Arias <eamanu@debian.org>  Sat, 27 Sep 2025 16:00:41 -0300
+
+python-elasticsearch (9.1.0-2) unstable; urgency=medium
+
+  * Team upload.
+  * Explicitely dependes on elastic-transport 9.x
+
+ -- Alexandre Detiste <tchet@debian.org>  Sat, 30 Aug 2025 15:28:05 +0200
+
+python-elasticsearch (9.1.0-1) unstable; urgency=medium
+
+  * d/watch: change to git mode
+  * disable CI pipeline on upstream and pristine-tar branch.
+  * New upstream version 9.1.0 (Closes: #1110796, #1110800)
+  * Rediff patches
+  * d/control: remove obsolete Rules-Requires-Root
+  * d/control: add new BD
+  * d/conrol: switch to new Mainversion 9.
+  * d/rules: disable new vector checks.
+
+ -- Karsten Schöke <karsten.schoeke@geobasis-bb.de>  Mon, 11 Aug 2025 15:41:27 +0200
+
 python-elasticsearch (8.17.2-2) unstable; urgency=medium
 
   * d/control: providing elasticsearch8 namespace (Closes: #1100099)
@@ -11,7 +61,7 @@ python-elasticsearch (8.17.2-1) unstable
   * d/control: Bump standards to 4.7.2
   * d/control: Add myself to Uploaders
   * d/control: depends on new python3-elastic-transport
-  * d/patches/001_disable_intersphinx: 
+  * d/patches/001_disable_intersphinx:
     use local inventory references for intersphinx
   * d/copyright: changes after new upstream release.
   * d/python3-elasticsearch.docs: rename README post new upsteam
diff -pruN 8.17.2-2/debian/control 9.2.0-1/debian/control
--- 8.17.2-2/debian/control	2025-03-29 10:11:31.000000000 +0000
+++ 9.2.0-1/debian/control	2025-10-29 10:14:03.000000000 +0000
@@ -13,30 +13,35 @@ Build-Depends: debhelper-compat (= 13),
                python-elastic-transport-doc <!nodoc>,
                python3-aiohttp <!nocheck>,
                python3-all,
+               python3-anyio,
                python3-dateutil <!nocheck>,
                python3-doc <!nodoc>,
                python3-elastic-transport,
                python3-hatchling,
                python3-orjson <!nocheck>,
+               python3-pydantic <!nocheck>,
                python3-pytest <!nocheck>,
                python3-pytest-asyncio <!nocheck>,
                python3-setuptools,
+               python3-sniffio,
                python3-sphinx <!nodoc>,
                python3-sphinx-rtd-theme <!nodoc>,
+               python3-trio <!nodoc>,
                python3-typing-extensions,
+               python3-pytz <!nocheck>,
                python3-yaml <!nocheck>,
 Standards-Version: 4.7.2
 Vcs-Browser: https://salsa.debian.org/python-team/packages/python-elasticsearch
 Vcs-Git: https://salsa.debian.org/python-team/packages/python-elasticsearch.git
 Homepage: https://github.com/elasticsearch/elasticsearch-py
-Rules-Requires-Root: no
 
 Package: python3-elasticsearch
 Architecture: all
 Depends: ${misc:Depends},
          ${python3:Depends},
+         python3-elastic-transport (>= 9.1), python3-elastic-transport (<< 10)
 Suggests: python-elasticsearch-doc,
-Provides: python3-elasticsearch8
+Provides: python3-elasticsearch9
 Description: Python client for Elasticsearch (Python3 version)
  Official low-level client for Elasticsearch. Its goal is to provide common
  ground for all Elasticsearch-related code in Python; because of this it tries
diff -pruN 8.17.2-2/debian/patches/001_disable_intersphinx 9.2.0-1/debian/patches/001_disable_intersphinx
--- 8.17.2-2/debian/patches/001_disable_intersphinx	2025-03-19 19:58:06.000000000 +0000
+++ 9.2.0-1/debian/patches/001_disable_intersphinx	2025-10-29 09:55:26.000000000 +0000
@@ -1,16 +1,23 @@
-Description: sphinx: use local inventory references for intersphinx
- Note that we will not be making this conditional: we want the build to
- fail if intersphinx was to fetch inventory data from the internet.
- .
-Author: Karsten Schöke <karsten.schoeke@geobasis-bb.de>
+From: =?utf-8?q?Karsten_Sch=C3=B6ke?= <karsten.schoeke@geobasis-bb.de>
+Date: Mon, 11 Aug 2025 12:43:47 +0200
+Subject: sphinx: use local inventory references for intersphinx
+
 Forwarded: not-needed
 Last-Update: 2025-03-19
 
---- python-elasticsearch-8.17.2.orig/docs/sphinx/conf.py
-+++ python-elasticsearch-8.17.2/docs/sphinx/conf.py
+Note that we will not be making this conditional: we want the build to
+fail if intersphinx was to fetch inventory data from the internet.
+---
+ docs/sphinx/conf.py | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py
+index d153793..e3cb15e 100644
+--- a/docs/sphinx/conf.py
++++ b/docs/sphinx/conf.py
 @@ -80,9 +80,10 @@ html_static_path = ["_static"]
  html_css_files = ["css/custom.css"]
-
+ 
  intersphinx_mapping = {
 -    "python": ("https://docs.python.org/3", None),
 +    "python": ("https://docs.python.org/3", 
diff -pruN 8.17.2-2/debian/python3-elasticsearch.links 9.2.0-1/debian/python3-elasticsearch.links
--- 8.17.2-2/debian/python3-elasticsearch.links	2025-03-29 10:11:31.000000000 +0000
+++ 9.2.0-1/debian/python3-elasticsearch.links	2025-10-29 09:54:49.000000000 +0000
@@ -1 +1 @@
-/usr/lib/python3/dist-packages/elasticsearch /usr/lib/python3/dist-packages/elasticsearch8
+/usr/lib/python3/dist-packages/elasticsearch /usr/lib/python3/dist-packages/elasticsearch9
diff -pruN 8.17.2-2/debian/rules 9.2.0-1/debian/rules
--- 8.17.2-2/debian/rules	2025-03-19 19:58:06.000000000 +0000
+++ 9.2.0-1/debian/rules	2025-10-29 11:44:06.000000000 +0000
@@ -4,8 +4,8 @@
 #export DH_VERBOSE := 1
 
 export PYBUILD_NAME := elasticsearch
-export PYBUILD_BEFORE_TEST=cp -a {dir}/test_elasticsearch {build_dir}
-export PYBUILD_AFTER_TEST=rm -rf {build_dir}/test_elasticsearch
+
+export PYBUILD_TEST_ARGS = {dir}/test_elasticsearch
 
 %:
 	dh $@ --with sphinxdoc --buildsystem=pybuild
diff -pruN 8.17.2-2/debian/salsa-ci.yml 9.2.0-1/debian/salsa-ci.yml
--- 8.17.2-2/debian/salsa-ci.yml	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/debian/salsa-ci.yml	2025-10-29 09:54:49.000000000 +0000
@@ -0,0 +1,7 @@
+---
+include:
+  - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/recipes/debian.yml
+
+variables:
+  SALSA_CI_IGNORED_BRANCHES: 'upstream|pristine-tar'
+
diff -pruN 8.17.2-2/debian/watch 9.2.0-1/debian/watch
--- 8.17.2-2/debian/watch	2024-06-02 10:28:05.000000000 +0000
+++ 9.2.0-1/debian/watch	2025-10-29 11:43:10.000000000 +0000
@@ -1,3 +1,8 @@
-version=4
-opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)\.tar\.gz%@PACKAGE@-$1.tar.gz%" \
-  https://github.com/elastic/elasticsearch-py/tags .*/v?(\d[.\d]*)\.tar\.gz
+Version: 5
+
+Source: https://github.com/elastic/elasticsearch-py.git
+Matching-Pattern: refs/tags/@ANY_VERSION@
+Compression: gz
+Dversionmangle: s/\+ds(\.?\d+)?$//
+Mode: git
+Uversionmangle: s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha)\.?\d*)$/$1~$2/
diff -pruN 8.17.2-2/docs/docset.yml 9.2.0-1/docs/docset.yml
--- 8.17.2-2/docs/docset.yml	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/docset.yml	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,12 @@
+project: 'Python client'
+products:
+  - id: elasticsearch-client
+cross_links:
+  - apm-agent-python
+  - docs-content
+  - elasticsearch
+toc:
+  - toc: reference
+  - toc: release-notes
+subs:
+  es:   "Elasticsearch"
diff -pruN 8.17.2-2/docs/guide/configuration.asciidoc 9.2.0-1/docs/guide/configuration.asciidoc
--- 8.17.2-2/docs/guide/configuration.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/configuration.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,451 +0,0 @@
-[[config]]
-== Configuration
-
-This page contains information about the most important configuration options of 
-the Python {es} client.
-
-
-[discrete]
-[[tls-and-ssl]]
-=== TLS/SSL
-
-The options in this section can only be used when the node is configured for HTTPS. An error will be raised if using these options with an HTTP node.
-
-[discrete]
-==== Verifying server certificates
-
-The typical route to verify a cluster certificate is via a "CA bundle" which can be specified via the `ca_certs` parameter. If no options are given and the https://github.com/certifi/python-certifi[certifi package] is installed then certifi's CA bundle is used by default.
-
-If you have your own CA bundle to use you can configure via the `ca_certs` parameter:
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    "https://...",
-    ca_certs="/path/to/certs.pem"
-)
-------------------------------------
-
-If using a generated certificate or certificate with a known fingerprint you can use the `ssl_assert_fingerprint` to specify the fingerprint which tries to match the server's leaf certificate during the TLS handshake. If there is any matching certificate the connection is verified, otherwise a `TlsError` is raised.
-
-In Python 3.9 and earlier only the leaf certificate will be verified but in Python 3.10+ private APIs are used to verify any certificate in the certificate chain. This helps when using certificates that are generated on a multi-node cluster.
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    "https://...",
-    ssl_assert_fingerprint=(
-        "315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3"
-    )
-)
-------------------------------------
-
-To disable certificate verification use the `verify_certs=False` parameter. This option should be avoided in production, instead use the other options to verify the clusters' certificate.
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    "https://...",
-    verify_certs=False
-)
-------------------------------------
-
-[discrete]
-==== TLS versions
-
-Configuring the minimum TLS version to connect to is done via the `ssl_version` parameter. By default this is set to a minimum value of TLSv1.2. Use the `ssl.TLSVersion` enumeration to specify versions.
-
-[source,python]
-------------------------------------
-import ssl
-
-client = Elasticsearch(
-    ...,
-    ssl_version=ssl.TLSVersion.TLSv1_2
-)
-------------------------------------
-
-[discrete]
-==== Client TLS certificate authentication
-
-Elasticsearch can be configured to authenticate clients via TLS client certificates. Client certificate and keys can be configured via the `client_cert` and `client_key` parameters:
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    ...,
-    client_cert="/path/to/cert.pem",
-    client_key="/path/to/key.pem",
-)
-------------------------------------
-
-
-[discrete]
-==== Using an SSLContext
-
-For advanced users an `ssl.SSLContext` object can be used for configuring TLS via the `ssl_context` parameter. The `ssl_context` parameter can't be combined with any other TLS options except for the `ssl_assert_fingerprint` parameter.
-
-[source,python]
-------------------------------------
-import ssl
-
-# Create and configure an SSLContext
-ctx = ssl.create_default_context()
-ctx.load_verify_locations(...)
-
-client = Elasticsearch(
-    ...,
-    ssl_context=ctx
-)
-------------------------------------
-
-
-[discrete]
-[[compression]]
-=== HTTP compression
-
-Compression of HTTP request and response bodies can be enabled with the `http_compress` parameter.
-If enabled then HTTP request bodies will be compressed with `gzip` and HTTP responses will include
-the `Accept-Encoding: gzip` HTTP header. By default compression is disabled.
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    ...,
-    http_compress=True  # Enable compression!
-)
-------------------------------------
-
-HTTP compression is recommended to be enabled when requests are traversing the network.
-Compression is automatically enabled when connecting to Elastic Cloud.
-
-
-[discrete]
-[[timeouts]]
-=== Request timeouts
-
-Requests can be configured to timeout if taking too long to be serviced. The `request_timeout` parameter can be passed via the client constructor or the client `.options()` method. When the request times out the node will raise a `ConnectionTimeout` exception which can trigger retries.
-
-Setting `request_timeout` to `None` will disable timeouts.
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    ...,
-    request_timeout=10  # 10 second timeout
-)
-
-# Search request will timeout in 5 seconds
-client.options(request_timeout=5).search(...)
-------------------------------------
-
-[discrete]
-==== API and server timeouts
-
-There are API-level timeouts to take into consideration when making requests which can cause the request to timeout on server-side rather than client-side. You may need to configure both a transport and API level timeout for long running operations.
-
-In the example below there are three different configurable timeouts for the `cluster.health` API all with different meanings for the request:
-
-[source,python]
-------------------------------------
-client.options(
-    # Amount of time to wait for an HTTP response to start.
-    request_timeout=30
-).cluster.health(
-    # Amount of time to wait to collect info on all nodes.
-    timeout=30,
-    # Amount of time to wait for info from the master node.
-    master_timeout=10,
-)
-------------------------------------
-
-
-[discrete]
-[[retries]]
-=== Retries
-
-Requests can be retried if they don't return with a successful response. This provides a way for requests to be resilient against transient failures or overloaded nodes.
-
-The maximum number of retries per request can be configured via the `max_retries` parameter. Setting this parameter to 0 disables retries. This parameter can be set in the client constructor or per-request via the client `.options()` method:
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    ...,
-    max_retries=5
-)
-
-# For this API request we disable retries with 'max_retries=0'
-client.options(max_retries=0).index(
-    index="blogs",
-    document={
-        "title": "..."
-    }
-)
-------------------------------------
-
-[discrete]
-==== Retrying on connection errors and timeouts
-
-Connection errors are automatically retried if retries are enabled. Retrying requests on connection timeouts can be enabled or disabled via the `retry_on_timeout` parameter. This parameter can be set on the client constructor or via the client `.options()` method:
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    ...,
-    retry_on_timeout=True
-)
-client.options(retry_on_timeout=False).info()
-------------------------------------
-
-[discrete]
-==== Retrying status codes
-
-By default if retries are enabled `retry_on_status` is set to `(429, 502, 503, 504)`. This parameter can be set on the client constructor or via the client `.options()` method. Setting this value to `()` will disable the default behavior.
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    ...,
-    retry_on_status=()
-)
-
-# Retry this API on '500 Internal Error' statuses
-client.options(retry_on_status=[500]).index(
-    index="blogs",
-    document={
-        "title": "..."
-    }
-)
-------------------------------------
-
-[discrete]
-==== Ignoring status codes
-
-By default an `ApiError` exception will be raised for any non-2XX HTTP requests that exhaust retries, if any. If you're expecting an HTTP error from the API but aren't interested in raising an exception you can use the `ignore_status` parameter via the client `.options()` method.
-
-A good example where this is useful is setting up or cleaning up resources in a cluster in a robust way:
-
-[source,python]
-------------------------------------
-client = Elasticsearch(...)
-
-# API request is robust against the index not existing:
-resp = client.options(ignore_status=404).indices.delete(index="delete-this")
-resp.meta.status  # Can be either '2XX' or '404'
-
-# API request is robust against the index already existing:
-resp = client.options(ignore_status=[400]).indices.create(
-    index="create-this",
-    mapping={
-        "properties": {"field": {"type": "integer"}}
-    }
-)
-resp.meta.status  # Can be either '2XX' or '400'
-------------------------------------
-
-When using the `ignore_status` parameter the error response will be returned serialized just like a non-error response. In these cases it can be useful to inspect the HTTP status of the response. To do this you can inspect the `resp.meta.status`.
-
-[discrete]
-[[sniffing]]
-=== Sniffing for new nodes
-
-Additional nodes can be discovered by a process called "sniffing" where the client will query the cluster for more nodes that can handle requests.
-
-Sniffing can happen at three different times: on client instantiation, before requests, and on a node failure. These three behaviors can be enabled and disabled with the `sniff_on_start`, `sniff_before_requests`, and `sniff_on_node_failure` parameters.
-
-IMPORTANT: When using an HTTP load balancer or proxy you cannot use sniffing functionality as the cluster would supply the client with IP addresses to directly connect to the cluster, circumventing the load balancer. Depending on your configuration this might be something you don't want or break completely.
-
-[discrete]
-==== Waiting between sniffing attempts
-
-To avoid needlessly sniffing too often there is a delay between attempts to discover new nodes. This value can be controlled via the `min_delay_between_sniffing` parameter.
-
-[discrete]
-==== Filtering nodes which are sniffed
-
-By default nodes which are marked with only a `master` role will not be used. To change the behavior the parameter `sniffed_node_callback` can be used. To mark a sniffed node not to be added to the node pool
-return `None` from the `sniffed_node_callback`, otherwise return a `NodeConfig` instance.
-
-[source,python]
-------------------------------------
-from typing import Optional, Dict, Any
-from elastic_transport import NodeConfig
-from elasticsearch import Elasticsearch
-
-def filter_master_eligible_nodes(
-    node_info: Dict[str, Any],
-    node_config: NodeConfig
-) -> Optional[NodeConfig]:
-    # This callback ignores all nodes that are master eligible
-    # instead of master-only nodes (default behavior)
-    if "master" in node_info.get("roles", ()):
-        return None
-    return node_config
-
-client = Elasticsearch(
-    "https://localhost:9200",
-    sniffed_node_callback=filter_master_eligible_nodes
-)
-------------------------------------
-
-The `node_info` parameter is part of the response from the `nodes.info()` API, below is an example
-of what that object looks like:
-
-[source,json]
-------------------------------------
-{
-  "name": "SRZpKFZ",
-  "transport_address": "127.0.0.1:9300",
-  "host": "127.0.0.1",
-  "ip": "127.0.0.1",
-  "version": "5.0.0",
-  "build_hash": "253032b",
-  "roles": ["master", "data", "ingest"],
-  "http": {
-    "bound_address": ["[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200"],
-    "publish_address": "1.1.1.1:123",
-    "max_content_length_in_bytes": 104857600
-  }
-}
-------------------------------------
-
-
-[discrete]
-[[node-pool]]
-=== Node Pool
-
-[discrete]
-==== Selecting a node from the pool
-
-You can specify a node selector pattern via the `node_selector_class` parameter. The supported values are `round_robin` and `random`. Default is `round_robin`.
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    ...,
-    node_selector_class="round_robin"
-)
-------------------------------------
-
-Custom selectors are also supported:
-
-[source,python]
-------------------------------------
-from elastic_transport import NodeSelector
-
-class CustomSelector(NodeSelector):
-    def select(nodes): ...
-
-client = Elasticsearch(
-    ...,
-    node_selector_class=CustomSelector
-)
-------------------------------------
-
-[discrete]
-==== Marking nodes dead and alive
-
-Individual nodes of Elasticsearch may have transient connectivity or load issues which may make them unable to service requests. To combat this the pool of nodes will detect when a node isn't able to service requests due to transport or API errors.
-
-After a node has been timed out it will be moved back to the set of "alive" nodes but only after the node returns a successful response will the node be marked as "alive" in terms of consecutive errors.
-
-The `dead_node_backoff_factor` and `max_dead_node_backoff` parameters can be used to configure how long the node pool will put the node into timeout with each consecutive failure. Both parameters use a unit of seconds.
-
-The calculation is equal to `min(dead_node_backoff_factor * (2 ** (consecutive_failures - 1)), max_dead_node_backoff)`.
-
-
-[discrete]
-[[serializer]]
-=== Serializers
-
-Serializers transform bytes on the wire into native Python objects and vice-versa. By default the client ships with serializers for `application/json`, `application/x-ndjson`, `text/*`, `application/vnd.apache.arrow.stream` and `application/mapbox-vector-tile`.
-
-You can define custom serializers via the `serializers` parameter:
-
-[source,python]
-------------------------------------
-from elasticsearch import Elasticsearch, JsonSerializer
-
-class JsonSetSerializer(JsonSerializer):
-    """Custom JSON serializer that handles Python sets"""
-    def default(self, data: Any) -> Any:
-        if isinstance(data, set):
-            return list(data)
-        return super().default(data)
-
-client = Elasticsearch(
-    ...,
-    # Serializers are a mapping of 'mimetype' to Serializer class.
-    serializers={"application/json": JsonSetSerializer()}
-)
-------------------------------------
-
-If the `orjson` package is installed, you can use the faster ``OrjsonSerializer`` for the default mimetype (``application/json``):
-
-[source,python]
-------------------------------------
-from elasticsearch import Elasticsearch, OrjsonSerializer
-
-es = Elasticsearch(
-    ...,
-    serializer=OrjsonSerializer()
-)
-------------------------------------
-
-orjson is particularly fast when serializing vectors as it has native numpy support. This will be the default in a future release. Note that you can install orjson with the `orjson` extra:
-
-[source,sh]
---------------------------------------------
-$ python -m pip install elasticsearch[orjson]
---------------------------------------------
-
-[discrete]
-[[nodes]]
-=== Nodes
-
-[discrete]
-==== Node implementations
-
-The default node class for synchronous I/O is `urllib3` and the default node class for asynchronous I/O is `aiohttp`.
-
-For all of the built-in HTTP node implementations like `urllib3`, `requests`, and `aiohttp` you can specify with a simple string to the `node_class` parameter:
-
-[source,python]
-------------------------------------
-from elasticsearch import Elasticsearch
-
-client = Elasticsearch(
-    ...,
-    node_class="requests"
-)
-------------------------------------
-
-You can also specify a custom node implementation via the `node_class` parameter:
-
-[source,python]
-------------------------------------
-from elasticsearch import Elasticsearch
-from elastic_transport import Urllib3HttpNode
-
-class CustomHttpNode(Urllib3HttpNode):
-    ...
-
-client = Elasticsearch(
-    ...
-    node_class=CustomHttpNode
-)
-------------------------------------
-
-[discrete]
-==== HTTP connections per node
-
-Each node contains its own pool of HTTP connections to allow for concurrent requests. This value is configurable via the `connections_per_node` parameter:
-
-[source,python]
-------------------------------------
-client = Elasticsearch(
-    ...,
-    connections_per_node=5
-)
-------------------------------------
diff -pruN 8.17.2-2/docs/guide/connecting.asciidoc 9.2.0-1/docs/guide/connecting.asciidoc
--- 8.17.2-2/docs/guide/connecting.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/connecting.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,438 +0,0 @@
-[[connecting]]
-== Connecting
-
-This page contains the information you need to connect the Client with {es}.
-
-[discrete]
-[[connect-ec]]
-=== Connecting to Elastic Cloud
-
-https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html[Elastic Cloud] 
-is the easiest way to get started with {es}. When connecting to Elastic Cloud 
-with the Python {es} client you should always use the `cloud_id` 
-parameter to connect. You can find this value within the "Manage Deployment" 
-page after you've created a cluster (look in the top-left if you're in Kibana).
-
-We recommend using a Cloud ID whenever possible because your client will be 
-automatically configured for optimal use with Elastic Cloud including HTTPS and 
-HTTP compression.
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Password for the 'elastic' user generated by Elasticsearch
-ELASTIC_PASSWORD = "<password>"
-
-# Found in the 'Manage Deployment' page
-CLOUD_ID = "deployment-name:dXMtZWFzdDQuZ2Nw..."
-
-# Create the client instance
-client = Elasticsearch(
-    cloud_id=CLOUD_ID,
-    basic_auth=("elastic", ELASTIC_PASSWORD)
-)
-
-# Successful response!
-client.info()
-# {'name': 'instance-0000000000', 'cluster_name': ...}
-----
-
-[discrete]
-[[connect-self-managed-new]]
-=== Connecting to a self-managed cluster
-
-By default {es} will start with security features like authentication and TLS 
-enabled. To connect to the {es} cluster you'll need to configure the Python {es} 
-client to use HTTPS with the generated CA certificate in order to make requests 
-successfully.
-
-If you're just getting started with {es} we recommend reading the documentation 
-on https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html[configuring] 
-and 
-https://www.elastic.co/guide/en/elasticsearch/reference/current/starting-elasticsearch.html[starting {es}] 
-to ensure your cluster is running as expected.
-
-When you start {es} for the first time you'll see a distinct block like the one 
-below in the output from {es} (you may have to scroll up if it's been a while):
-
-```sh
-----------------------------------------------------------------
--> Elasticsearch security features have been automatically configured!
--> Authentication is enabled and cluster connections are encrypted.
-
-->  Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`):
-  lhQpLELkjkrawaBoaz0Q
-
-->  HTTP CA certificate SHA-256 fingerprint:
-  a52dd93511e8c6045e21f16654b77c9ee0f34aea26d9f40320b531c474676228
-...
-----------------------------------------------------------------
-```
-
-Note down the `elastic` user password and HTTP CA fingerprint for the next 
-sections. In the examples below they will be stored in the variables 
-`ELASTIC_PASSWORD` and `CERT_FINGERPRINT` respectively.
-
-Depending on the circumstances there are two options for verifying the HTTPS 
-connection, either verifying with the CA certificate itself or via the HTTP CA 
-certificate fingerprint.
-
-[discrete]
-==== Verifying HTTPS with CA certificates
-
-Using the `ca_certs` option is the default way the Python {es} client verifies 
-an HTTPS connection.
-
-The generated root CA certificate can be found in the `certs` directory in your 
-{es} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you're running {es} 
-in Docker there is 
-https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html[additional documentation for retrieving the CA certificate].
-
-Once you have the `http_ca.crt` file somewhere accessible pass the path to the 
-client via `ca_certs`:
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Password for the 'elastic' user generated by Elasticsearch
-ELASTIC_PASSWORD = "<password>"
-
-# Create the client instance
-client = Elasticsearch(
-    "https://localhost:9200",
-    ca_certs="/path/to/http_ca.crt",
-    basic_auth=("elastic", ELASTIC_PASSWORD)
-)
-
-# Successful response!
-client.info()
-# {'name': 'instance-0000000000', 'cluster_name': ...}
-----
-
-NOTE: If you don't specify `ca_certs` or `ssl_assert_fingerprint` then the 
-https://certifiio.readthedocs.io[certifi package] will be used for `ca_certs` by 
-default if available.
-
-[discrete]
-==== Verifying HTTPS with certificate fingerprints (Python 3.10 or later)
-
-NOTE: Using this method **requires using Python 3.10 or later** and isn't 
-available when using the `aiohttp` HTTP client library so can't be used with 
-`AsyncElasticsearch`.
-
-This method of verifying the HTTPS connection takes advantage of the certificate 
-fingerprint value noted down earlier. Take this SHA256 fingerprint value and 
-pass it to the Python {es} client via `ssl_assert_fingerprint`:
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Fingerprint either from Elasticsearch startup or above script.
-# Colons and uppercase/lowercase don't matter when using
-# the 'ssl_assert_fingerprint' parameter
-CERT_FINGERPRINT = "A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28"
-
-# Password for the 'elastic' user generated by Elasticsearch
-ELASTIC_PASSWORD = "<password>"
-
-client = Elasticsearch(
-    "https://localhost:9200",
-    ssl_assert_fingerprint=CERT_FINGERPRINT,
-    basic_auth=("elastic", ELASTIC_PASSWORD)
-)
-
-# Successful response!
-client.info()
-# {'name': 'instance-0000000000', 'cluster_name': ...}
-----
-
-The certificate fingerprint can be calculated using `openssl x509` with the 
-certificate file:
-
-[source,sh]
-----
-openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt
-----
-
-If you don't have access to the generated CA file from {es} you can use the 
-following script to output the root CA fingerprint of the {es} instance with 
-`openssl s_client`:
-
-[source,sh]
-----
-# Replace the values of 'localhost' and '9200' to the
-# corresponding host and port values for the cluster.
-openssl s_client -connect localhost:9200 -servername localhost -showcerts </dev/null 2>/dev/null \
-  | openssl x509 -fingerprint -sha256 -noout -in /dev/stdin
-----
-
-The output of `openssl x509` will look something like this:
-
-[source,sh]
-----
-SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28
-----
-
-
-[discrete]
-[[connect-no-security]]
-=== Connecting without security enabled
-
-WARNING: Running {es} without security enabled is not recommended.
-
-If your cluster is configured with 
-https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html[security explicitly disabled] 
-then you can connect via HTTP:
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Create the client instance
-client = Elasticsearch("http://localhost:9200")
-
-# Successful response!
-client.info()
-# {'name': 'instance-0000000000', 'cluster_name': ...}
-----
-
-[discrete]
-[[connect-url]]
-=== Connecting to multiple nodes
-
-The Python {es} client supports sending API requests to multiple nodes in the 
-cluster. This means that work will be more evenly spread across the cluster 
-instead of hammering the same node over and over with requests. To configure the 
-client with multiple nodes you can pass a list of URLs, each URL will be used as 
-a separate node in the pool.
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# List of nodes to connect use with different hosts and ports.
-NODES = [
-    "https://localhost:9200",
-    "https://localhost:9201",
-    "https://localhost:9202",
-]
-
-# Password for the 'elastic' user generated by Elasticsearch
-ELASTIC_PASSWORD = "<password>"
-
-client = Elasticsearch(
-    NODES,
-    ca_certs="/path/to/http_ca.crt",
-    basic_auth=("elastic", ELASTIC_PASSWORD)
-)
-----
-
-By default nodes are selected using round-robin, but alternate node selection 
-strategies can be configured with `node_selector_class` parameter.
-
-NOTE: If your {es} cluster is behind a load balancer like when using Elastic 
-Cloud you won't need to configure multiple nodes. Instead use the load balancer 
-host and port.
-
-
-[discrete]
-[[authentication]]
-=== Authentication
-
-This section contains code snippets to show you how to connect to various {es} 
-providers. All authentication methods are supported on the client constructor
-or via the per-request `.options()` method:
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Authenticate from the constructor
-client = Elasticsearch(
-    "https://localhost:9200",
-    ca_certs="/path/to/http_ca.crt",
-    basic_auth=("username", "password")
-)
-
-# Authenticate via the .options() method:
-client.options(
-    basic_auth=("username", "password")
-).indices.get(index="*")
-
-# You can persist the authenticated client to use
-# later or use for multiple API calls:
-auth_client = client.options(api_key="api_key")
-for i in range(10):
-    auth_client.index(
-        index="example-index",
-        document={"field": i}
-    )
-----
-
-
-[discrete]
-[[auth-basic]]
-==== HTTP Basic authentication (Username and Password)
-
-HTTP Basic authentication uses the `basic_auth` parameter by passing in a 
-username and password within a tuple:
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Adds the HTTP header 'Authorization: Basic <base64 username:password>'
-client = Elasticsearch(
-    "https://localhost:9200",
-    ca_certs="/path/to/http_ca.crt",
-    basic_auth=("username", "password")
-)
-----
-
-
-[discrete]
-[[auth-bearer]]
-==== HTTP Bearer authentication
-
-HTTP Bearer authentication uses the `bearer_auth` parameter by passing the token
-as a string. This authentication method is used by 
-https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/security-api-create-service-token.html[Service Account Tokens]
-and https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/security-api-get-token.html[Bearer Tokens].
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Adds the HTTP header 'Authorization: Bearer token-value'
-client = Elasticsearch(
-    "https://localhost:9200",
-    bearer_auth="token-value"
-)
-----
-
-
-[discrete]
-[[auth-apikey]]
-==== API Key authentication
-
-You can configure the client to use {es}'s API Key for connecting to your
-cluster. These can be generated through the
-https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html[Elasticsearch Create API key API]
-or https://www.elastic.co/guide/en/kibana/current/api-keys.html#create-api-key[Kibana Stack Management].
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Adds the HTTP header 'Authorization: ApiKey <base64 api_key.id:api_key.api_key>'
-client = Elasticsearch(
-    "https://localhost:9200",
-    ca_certs="/path/to/http_ca.crt",
-    api_key="api_key",
-)
-----
-
-[discrete]
-[[compatibility-mode]]
-=== Enabling the Compatibility Mode
-
-The {es} server version 8.0 is introducing a new compatibility mode that allows 
-you a smoother upgrade experience from 7 to 8. In a nutshell, you can use the 
-latest 7.x Python {es} {es} client with an 8.x {es} server, giving more room to 
-coordinate the upgrade of your codebase to the next major version. 
-
-If you want to leverage this functionality, please make sure that you are using 
-the latest 7.x Python {es} client and set the environment variable 
-`ELASTIC_CLIENT_APIVERSIONING` to `true`. The client is handling the rest
-internally. For every 8.0 and beyond Python {es} client, you're all set! The 
-compatibility mode is enabled by default.
-
-[discrete]
-[[connecting-faas]]
-=== Using the Client in a Function-as-a-Service Environment
-
-This section illustrates the best practices for leveraging the {es} client in a 
-Function-as-a-Service (FaaS) environment.
-
-The most influential optimization is to initialize the client outside of the 
-function, the global scope.
-
-This practice does not only improve performance but also enables background 
-functionality as – for example –
-https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how[sniffing].
-The following examples provide a skeleton for the best practices.
-
-IMPORTANT: The async client shouldn't be used within Function-as-a-Service as a new event
-           loop must be started for each invocation. Instead the synchronous `Elasticsearch`
-           client is recommended.
-
-[discrete]
-[[connecting-faas-gcp]]
-==== GCP Cloud Functions
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Client initialization
-client = Elasticsearch(
-    cloud_id="deployment-name:ABCD...",
-    api_key=...
-)
-
-def main(request):
-    # Use the client
-    client.search(index=..., query={"match_all": {}})
-
-----
-
-[discrete]
-[[connecting-faas-aws]]
-==== AWS Lambda
-
-[source,python]
-----
-from elasticsearch import Elasticsearch
-
-# Client initialization
-client = Elasticsearch(
-    cloud_id="deployment-name:ABCD...",
-    api_key=...
-)
-
-def main(event, context):
-    # Use the client
-    client.search(index=..., query={"match_all": {}})
-
-----
-
-[discrete]
-[[connecting-faas-azure]]
-==== Azure Functions
-
-[source,python]
-----
-import azure.functions as func
-from elasticsearch import Elasticsearch
-
-# Client initialization
-client = Elasticsearch(
-    cloud_id="deployment-name:ABCD...",
-    api_key=...
-)
-
-def main(request: func.HttpRequest) -> func.HttpResponse:
-    # Use the client
-    client.search(index=..., query={"match_all": {}})
-
-----
-
-Resources used to assess these recommendations:
-
-* https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations[GCP Cloud Functions: Tips & Tricks]
-* https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html[Best practices for working with AWS Lambda functions]
-* https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables[Azure Functions Python developer guide]
-* https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.html[AWS Lambda: Comparing the effect of global scope]
diff -pruN 8.17.2-2/docs/guide/esql-pandas.asciidoc 9.2.0-1/docs/guide/esql-pandas.asciidoc
--- 8.17.2-2/docs/guide/esql-pandas.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/esql-pandas.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,453 +0,0 @@
-[[esql-pandas]]
-=== ES|QL and Pandas
-
-The {ref}/esql.html[Elasticsearch Query Language (ES|QL)] provides a powerful
-way to filter, transform, and analyze data stored in {es}. Designed to be easy
-to learn and use, it is a perfect fit for data scientists familiar with Pandas
-and other dataframe-based libraries. ES|QL queries produce tables with named
-columns, which is the definition of dataframes.
-
-This page shows you an example of using ES|QL and Pandas together to work with
-dataframes.
-
-[discrete]
-[[import-data]]
-==== Import data
-
-Use the 
-https://github.com/elastic/elasticsearch/blob/main/x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees.csv[`employees` sample data] and 
-https://github.com/elastic/elasticsearch/blob/main/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-default.json[mapping].
-The easiest way to load this dataset is to run https://gist.github.com/pquentin/7cf29a5932cf52b293699dd994b1a276[two Elasticsearch API requests] in the Kibana Console.
-
-.Index mapping request
-[%collapsible]
-====
-[source,console]
---------------------------------------------------
-PUT employees
-{
-  "mappings": {
-    "properties": {
-      "avg_worked_seconds": {
-        "type": "long"
-      },
-      "birth_date": {
-        "type": "date"
-      },
-      "emp_no": {
-        "type": "integer"
-      },
-      "first_name": {
-        "type": "keyword"
-      },
-      "gender": {
-        "type": "keyword"
-      },
-      "height": {
-        "type": "double",
-        "fields": {
-          "float": {
-            "type": "float"
-          },
-          "half_float": {
-            "type": "half_float"
-          },
-          "scaled_float": {
-            "type": "scaled_float",
-            "scaling_factor": 100
-          }
-        }
-      },
-      "hire_date": {
-        "type": "date"
-      },
-      "is_rehired": {
-        "type": "boolean"
-      },
-      "job_positions": {
-        "type": "keyword"
-      },
-      "languages": {
-        "type": "integer",
-        "fields": {
-          "byte": {
-            "type": "byte"
-          },
-          "long": {
-            "type": "long"
-          },
-          "short": {
-            "type": "short"
-          }
-        }
-      },
-      "last_name": {
-        "type": "keyword"
-      },
-      "salary": {
-        "type": "integer"
-      },
-      "salary_change": {
-        "type": "double",
-        "fields": {
-          "int": {
-            "type": "integer"
-          },
-          "keyword": {
-            "type": "keyword"
-          },
-          "long": {
-            "type": "long"
-          }
-        }
-      },
-      "still_hired": {
-        "type": "boolean"
-      }
-    }
-  }
-}
---------------------------------------------------
-// TEST[skip:TBD]
-====
-
-.Bulk request to ingest data
-[%collapsible]
-====
-[source,console]
---------------------------------------------------
-PUT employees/_bulk
-{ "index": {}}
-{"birth_date":"1953-09-02T00:00:00Z","emp_no":"10001","first_name":"Georgi","gender":"M","hire_date":"1986-06-26T00:00:00Z","languages":"2","last_name":"Facello","salary":"57305","height":"2.03","still_hired":"true","avg_worked_seconds":"268728049","job_positions":["Senior Python Developer","Accountant"],"is_rehired":["false","true"],"salary_change":"1.19"}
-{ "index": {}}
-{"birth_date":"1964-06-02T00:00:00Z","emp_no":"10002","first_name":"Bezalel","gender":"F","hire_date":"1985-11-21T00:00:00Z","languages":"5","last_name":"Simmel","salary":"56371","height":"2.08","still_hired":"true","avg_worked_seconds":"328922887","job_positions":"Senior Team Lead","is_rehired":["false","false"],"salary_change":["-7.23","11.17"]}
-{ "index": {}}
-{"birth_date":"1959-12-03T00:00:00Z","emp_no":"10003","first_name":"Parto","gender":"M","hire_date":"1986-08-28T00:00:00Z","languages":"4","last_name":"Bamford","salary":"61805","height":"1.83","still_hired":"false","avg_worked_seconds":"200296405","salary_change":["14.68","12.82"]}
-{ "index": {}}
-{"birth_date":"1954-05-01T00:00:00Z","emp_no":"10004","first_name":"Chirstian","gender":"M","hire_date":"1986-12-01T00:00:00Z","languages":"5","last_name":"Koblick","salary":"36174","height":"1.78","still_hired":"true","avg_worked_seconds":"311267831","job_positions":["Reporting Analyst","Tech Lead","Head Human Resources","Support Engineer"],"is_rehired":"true","salary_change":["3.65","-0.35","1.13","13.48"]}
-{ "index": {}}
-{"birth_date":"1955-01-21T00:00:00Z","emp_no":"10005","first_name":"Kyoichi","gender":"M","hire_date":"1989-09-12T00:00:00Z","languages":"1","last_name":"Maliniak","salary":"63528","height":"2.05","still_hired":"true","avg_worked_seconds":"244294991","is_rehired":["false","false","false","true"],"salary_change":["-2.14","13.07"]}
-{ "index": {}}
-{"birth_date":"1953-04-20T00:00:00Z","emp_no":"10006","first_name":"Anneke","gender":"F","hire_date":"1989-06-02T00:00:00Z","languages":"3","last_name":"Preusig","salary":"60335","height":"1.56","still_hired":"false","avg_worked_seconds":"372957040","job_positions":["Tech Lead","Principal Support Engineer","Senior Team Lead"],"salary_change":"-3.90"}
-{ "index": {}}
-{"birth_date":"1957-05-23T00:00:00Z","emp_no":"10007","first_name":"Tzvetan","gender":"F","hire_date":"1989-02-10T00:00:00Z","languages":"4","last_name":"Zielinski","salary":"74572","height":"1.70","still_hired":"true","avg_worked_seconds":"393084805","is_rehired":["true","false","true","false"],"salary_change":["-7.06","1.99","0.57"]}
-{ "index": {}}
-{"birth_date":"1958-02-19T00:00:00Z","emp_no":"10008","first_name":"Saniya","gender":"M","hire_date":"1994-09-15T00:00:00Z","languages":"2","last_name":"Kalloufi","salary":"43906","height":"2.10","still_hired":"true","avg_worked_seconds":"283074758","job_positions":["Senior Python Developer","Junior Developer","Purchase Manager","Internship"],"is_rehired":["true","false"],"salary_change":["12.68","3.54","0.75","-2.92"]}
-{ "index": {}}
-{"birth_date":"1952-04-19T00:00:00Z","emp_no":"10009","first_name":"Sumant","gender":"F","hire_date":"1985-02-18T00:00:00Z","languages":"1","last_name":"Peac","salary":"66174","height":"1.85","still_hired":"false","avg_worked_seconds":"236805489","job_positions":["Senior Python Developer","Internship"]}
-{ "index": {}}
-{"birth_date":"1963-06-01T00:00:00Z","emp_no":"10010","first_name":"Duangkaew","hire_date":"1989-08-24T00:00:00Z","languages":"4","last_name":"Piveteau","salary":"45797","height":"1.70","still_hired":"false","avg_worked_seconds":"315236372","job_positions":["Architect","Reporting Analyst","Tech Lead","Purchase Manager"],"is_rehired":["true","true","false","false"],"salary_change":["5.05","-6.77","4.69","12.15"]}
-{ "index": {}}
-{"birth_date":"1953-11-07T00:00:00Z","emp_no":"10011","first_name":"Mary","hire_date":"1990-01-22T00:00:00Z","languages":"5","last_name":"Sluis","salary":"31120","height":"1.50","still_hired":"true","avg_worked_seconds":"239615525","job_positions":["Architect","Reporting Analyst","Tech Lead","Senior Team Lead"],"is_rehired":["true","true"],"salary_change":["10.35","-7.82","8.73","3.48"]}
-{ "index": {}}
-{"birth_date":"1960-10-04T00:00:00Z","emp_no":"10012","first_name":"Patricio","hire_date":"1992-12-18T00:00:00Z","languages":"5","last_name":"Bridgland","salary":"48942","height":"1.97","still_hired":"false","avg_worked_seconds":"365510850","job_positions":["Head Human Resources","Accountant"],"is_rehired":["false","true","true","false"],"salary_change":"0.04"}
-{ "index": {}}
-{"birth_date":"1963-06-07T00:00:00Z","emp_no":"10013","first_name":"Eberhardt","hire_date":"1985-10-20T00:00:00Z","languages":"1","last_name":"Terkki","salary":"48735","height":"1.94","still_hired":"true","avg_worked_seconds":"253864340","job_positions":"Reporting Analyst","is_rehired":["true","true"]}
-{ "index": {}}
-{"birth_date":"1956-02-12T00:00:00Z","emp_no":"10014","first_name":"Berni","hire_date":"1987-03-11T00:00:00Z","languages":"5","last_name":"Genin","salary":"37137","height":"1.99","still_hired":"false","avg_worked_seconds":"225049139","job_positions":["Reporting Analyst","Data Scientist","Head Human Resources"],"salary_change":["-1.89","9.07"]}
-{ "index": {}}
-{"birth_date":"1959-08-19T00:00:00Z","emp_no":"10015","first_name":"Guoxiang","hire_date":"1987-07-02T00:00:00Z","languages":"5","last_name":"Nooteboom","salary":"25324","height":"1.66","still_hired":"true","avg_worked_seconds":"390266432","job_positions":["Principal Support Engineer","Junior Developer","Head Human Resources","Support Engineer"],"is_rehired":["true","false","false","false"],"salary_change":["14.25","12.40"]}
-{ "index": {}}
-{"birth_date":"1961-05-02T00:00:00Z","emp_no":"10016","first_name":"Kazuhito","hire_date":"1995-01-27T00:00:00Z","languages":"2","last_name":"Cappelletti","salary":"61358","height":"1.54","still_hired":"false","avg_worked_seconds":"253029411","job_positions":["Reporting Analyst","Python Developer","Accountant","Purchase Manager"],"is_rehired":["false","false"],"salary_change":["-5.18","7.69"]}
-{ "index": {}}
-{"birth_date":"1958-07-06T00:00:00Z","emp_no":"10017","first_name":"Cristinel","hire_date":"1993-08-03T00:00:00Z","languages":"2","last_name":"Bouloucos","salary":"58715","height":"1.74","still_hired":"false","avg_worked_seconds":"236703986","job_positions":["Data Scientist","Head Human Resources","Purchase Manager"],"is_rehired":["true","false","true","true"],"salary_change":"-6.33"}
-{ "index": {}}
-{"birth_date":"1954-06-19T00:00:00Z","emp_no":"10018","first_name":"Kazuhide","hire_date":"1987-04-03T00:00:00Z","languages":"2","last_name":"Peha","salary":"56760","height":"1.97","still_hired":"false","avg_worked_seconds":"309604079","job_positions":"Junior Developer","is_rehired":["false","false","true","true"],"salary_change":["-1.64","11.51","-5.32"]}
-{ "index": {}}
-{"birth_date":"1953-01-23T00:00:00Z","emp_no":"10019","first_name":"Lillian","hire_date":"1999-04-30T00:00:00Z","languages":"1","last_name":"Haddadi","salary":"73717","height":"2.06","still_hired":"false","avg_worked_seconds":"342855721","job_positions":"Purchase Manager","is_rehired":["false","false"],"salary_change":["-6.84","8.42","-7.26"]}
-{ "index": {}}
-{"birth_date":"1952-12-24T00:00:00Z","emp_no":"10020","first_name":"Mayuko","gender":"M","hire_date":"1991-01-26T00:00:00Z","last_name":"Warwick","salary":"40031","height":"1.41","still_hired":"false","avg_worked_seconds":"373309605","job_positions":"Tech Lead","is_rehired":["true","true","false"],"salary_change":"-5.81"}
-{ "index": {}}
-{"birth_date":"1960-02-20T00:00:00Z","emp_no":"10021","first_name":"Ramzi","gender":"M","hire_date":"1988-02-10T00:00:00Z","last_name":"Erde","salary":"60408","height":"1.47","still_hired":"false","avg_worked_seconds":"287654610","job_positions":"Support Engineer","is_rehired":"true"}
-{ "index": {}}
-{"birth_date":"1952-07-08T00:00:00Z","emp_no":"10022","first_name":"Shahaf","gender":"M","hire_date":"1995-08-22T00:00:00Z","last_name":"Famili","salary":"48233","height":"1.82","still_hired":"false","avg_worked_seconds":"233521306","job_positions":["Reporting Analyst","Data Scientist","Python Developer","Internship"],"is_rehired":["true","false"],"salary_change":["12.09","2.85"]}
-{ "index": {}}
-{"birth_date":"1953-09-29T00:00:00Z","emp_no":"10023","first_name":"Bojan","gender":"F","hire_date":"1989-12-17T00:00:00Z","last_name":"Montemayor","salary":"47896","height":"1.75","still_hired":"true","avg_worked_seconds":"330870342","job_positions":["Accountant","Support Engineer","Purchase Manager"],"is_rehired":["true","true","false"],"salary_change":["14.63","0.80"]}
-{ "index": {}}
-{"birth_date":"1958-09-05T00:00:00Z","emp_no":"10024","first_name":"Suzette","gender":"F","hire_date":"1997-05-19T00:00:00Z","last_name":"Pettey","salary":"64675","height":"2.08","still_hired":"true","avg_worked_seconds":"367717671","job_positions":"Junior Developer","is_rehired":["true","true","true","true"]}
-{ "index": {}}
-{"birth_date":"1958-10-31T00:00:00Z","emp_no":"10025","first_name":"Prasadram","gender":"M","hire_date":"1987-08-17T00:00:00Z","last_name":"Heyers","salary":"47411","height":"1.87","still_hired":"false","avg_worked_seconds":"371270797","job_positions":"Accountant","is_rehired":["true","false"],"salary_change":["-4.33","-2.90","12.06","-3.46"]}
-{ "index": {}}
-{"birth_date":"1953-04-03T00:00:00Z","emp_no":"10026","first_name":"Yongqiao","gender":"M","hire_date":"1995-03-20T00:00:00Z","last_name":"Berztiss","salary":"28336","height":"2.10","still_hired":"true","avg_worked_seconds":"359208133","job_positions":"Reporting Analyst","is_rehired":["false","true"],"salary_change":["-7.37","10.62","11.20"]}
-{ "index": {}}
-{"birth_date":"1962-07-10T00:00:00Z","emp_no":"10027","first_name":"Divier","gender":"F","hire_date":"1989-07-07T00:00:00Z","last_name":"Reistad","salary":"73851","height":"1.53","still_hired":"false","avg_worked_seconds":"374037782","job_positions":"Senior Python Developer","is_rehired":"false"}
-{ "index": {}}
-{"birth_date":"1963-11-26T00:00:00Z","emp_no":"10028","first_name":"Domenick","gender":"M","hire_date":"1991-10-22T00:00:00Z","last_name":"Tempesti","salary":"39356","height":"2.07","still_hired":"true","avg_worked_seconds":"226435054","job_positions":["Tech Lead","Python Developer","Accountant","Internship"],"is_rehired":["true","false","false","true"]}
-{ "index": {}}
-{"birth_date":"1956-12-13T00:00:00Z","emp_no":"10029","first_name":"Otmar","gender":"M","hire_date":"1985-11-20T00:00:00Z","last_name":"Herbst","salary":"74999","height":"1.99","still_hired":"false","avg_worked_seconds":"257694181","job_positions":["Senior Python Developer","Data Scientist","Principal Support Engineer"],"is_rehired":"true","salary_change":["-0.32","-1.90","-8.19"]}
-{ "index": {}}
-{"birth_date":"1958-07-14T00:00:00Z","emp_no":"10030","gender":"M","hire_date":"1994-02-17T00:00:00Z","languages":"3","last_name":"Demeyer","salary":"67492","height":"1.92","still_hired":"false","avg_worked_seconds":"394597613","job_positions":["Tech Lead","Data Scientist","Senior Team Lead"],"is_rehired":["true","false","false"],"salary_change":"-0.40"}
-{ "index": {}}
-{"birth_date":"1959-01-27T00:00:00Z","emp_no":"10031","gender":"M","hire_date":"1991-09-01T00:00:00Z","languages":"4","last_name":"Joslin","salary":"37716","height":"1.68","still_hired":"false","avg_worked_seconds":"348545109","job_positions":["Architect","Senior Python Developer","Purchase Manager","Senior Team Lead"],"is_rehired":"false"}
-{ "index": {}}
-{"birth_date":"1960-08-09T00:00:00Z","emp_no":"10032","gender":"F","hire_date":"1990-06-20T00:00:00Z","languages":"3","last_name":"Reistad","salary":"62233","height":"2.10","still_hired":"false","avg_worked_seconds":"277622619","job_positions":["Architect","Senior Python Developer","Junior Developer","Purchase Manager"],"is_rehired":["false","false"],"salary_change":["9.32","-4.92"]}
-{ "index": {}}
-{"birth_date":"1956-11-14T00:00:00Z","emp_no":"10033","gender":"M","hire_date":"1987-03-18T00:00:00Z","languages":"1","last_name":"Merlo","salary":"70011","height":"1.63","still_hired":"false","avg_worked_seconds":"208374744","is_rehired":"true"}
-{ "index": {}}
-{"birth_date":"1962-12-29T00:00:00Z","emp_no":"10034","gender":"M","hire_date":"1988-09-21T00:00:00Z","languages":"1","last_name":"Swan","salary":"39878","height":"1.46","still_hired":"false","avg_worked_seconds":"214393176","job_positions":["Business Analyst","Data Scientist","Python Developer","Accountant"],"is_rehired":"false","salary_change":"-8.46"}
-{ "index": {}}
-{"birth_date":"1953-02-08T00:00:00Z","emp_no":"10035","gender":"M","hire_date":"1988-09-05T00:00:00Z","languages":"5","last_name":"Chappelet","salary":"25945","height":"1.81","still_hired":"false","avg_worked_seconds":"203838153","job_positions":["Senior Python Developer","Data Scientist"],"is_rehired":"false","salary_change":["-2.54","-6.58"]}
-{ "index": {}}
-{"birth_date":"1959-08-10T00:00:00Z","emp_no":"10036","gender":"M","hire_date":"1992-01-03T00:00:00Z","languages":"4","last_name":"Portugali","salary":"60781","height":"1.61","still_hired":"false","avg_worked_seconds":"305493131","job_positions":"Senior Python Developer","is_rehired":["true","false","false"]}
-{ "index": {}}
-{"birth_date":"1963-07-22T00:00:00Z","emp_no":"10037","gender":"M","hire_date":"1990-12-05T00:00:00Z","languages":"2","last_name":"Makrucki","salary":"37691","height":"2.00","still_hired":"true","avg_worked_seconds":"359217000","job_positions":["Senior Python Developer","Tech Lead","Accountant"],"is_rehired":"false","salary_change":"-7.08"}
-{ "index": {}}
-{"birth_date":"1960-07-20T00:00:00Z","emp_no":"10038","gender":"M","hire_date":"1989-09-20T00:00:00Z","languages":"4","last_name":"Lortz","salary":"35222","height":"1.53","still_hired":"true","avg_worked_seconds":"314036411","job_positions":["Senior Python Developer","Python Developer","Support Engineer"]}
-{ "index": {}}
-{"birth_date":"1959-10-01T00:00:00Z","emp_no":"10039","gender":"M","hire_date":"1988-01-19T00:00:00Z","languages":"2","last_name":"Brender","salary":"36051","height":"1.55","still_hired":"false","avg_worked_seconds":"243221262","job_positions":["Business Analyst","Python Developer","Principal Support Engineer"],"is_rehired":["true","true"],"salary_change":"-6.90"}
-{ "index": {}}
-{"emp_no":"10040","first_name":"Weiyi","gender":"F","hire_date":"1993-02-14T00:00:00Z","languages":"4","last_name":"Meriste","salary":"37112","height":"1.90","still_hired":"false","avg_worked_seconds":"244478622","job_positions":"Principal Support Engineer","is_rehired":["true","false","true","true"],"salary_change":["6.97","14.74","-8.94","1.92"]}
-{ "index": {}}
-{"emp_no":"10041","first_name":"Uri","gender":"F","hire_date":"1989-11-12T00:00:00Z","languages":"1","last_name":"Lenart","salary":"56415","height":"1.75","still_hired":"false","avg_worked_seconds":"287789442","job_positions":["Data Scientist","Head Human Resources","Internship","Senior Team Lead"],"salary_change":["9.21","0.05","7.29","-2.94"]}
-{ "index": {}}
-{"emp_no":"10042","first_name":"Magy","gender":"F","hire_date":"1993-03-21T00:00:00Z","languages":"3","last_name":"Stamatiou","salary":"30404","height":"1.44","still_hired":"true","avg_worked_seconds":"246355863","job_positions":["Architect","Business Analyst","Junior Developer","Internship"],"salary_change":["-9.28","9.42"]}
-{ "index": {}}
-{"emp_no":"10043","first_name":"Yishay","gender":"M","hire_date":"1990-10-20T00:00:00Z","languages":"1","last_name":"Tzvieli","salary":"34341","height":"1.52","still_hired":"true","avg_worked_seconds":"287222180","job_positions":["Data Scientist","Python Developer","Support Engineer"],"is_rehired":["false","true","true"],"salary_change":["-5.17","4.62","7.42"]}
-{ "index": {}}
-{"emp_no":"10044","first_name":"Mingsen","gender":"F","hire_date":"1994-05-21T00:00:00Z","languages":"1","last_name":"Casley","salary":"39728","height":"2.06","still_hired":"false","avg_worked_seconds":"387408356","job_positions":["Tech Lead","Principal Support Engineer","Accountant","Support Engineer"],"is_rehired":["true","true"],"salary_change":"8.09"}
-{ "index": {}}
-{"emp_no":"10045","first_name":"Moss","gender":"M","hire_date":"1989-09-02T00:00:00Z","languages":"3","last_name":"Shanbhogue","salary":"74970","height":"1.70","still_hired":"false","avg_worked_seconds":"371418933","job_positions":["Principal Support Engineer","Junior Developer","Accountant","Purchase Manager"],"is_rehired":["true","false"]}
-{ "index": {}}
-{"emp_no":"10046","first_name":"Lucien","gender":"M","hire_date":"1992-06-20T00:00:00Z","languages":"4","last_name":"Rosenbaum","salary":"50064","height":"1.52","still_hired":"true","avg_worked_seconds":"302353405","job_positions":["Principal Support Engineer","Junior Developer","Head Human Resources","Internship"],"is_rehired":["true","true","false","true"],"salary_change":"2.39"}
-{ "index": {}}
-{"emp_no":"10047","first_name":"Zvonko","gender":"M","hire_date":"1989-03-31T00:00:00Z","languages":"4","last_name":"Nyanchama","salary":"42716","height":"1.52","still_hired":"true","avg_worked_seconds":"306369346","job_positions":["Architect","Data Scientist","Principal Support Engineer","Senior Team Lead"],"is_rehired":"true","salary_change":["-6.36","12.12"]}
-{ "index": {}}
-{"emp_no":"10048","first_name":"Florian","gender":"M","hire_date":"1985-02-24T00:00:00Z","languages":"3","last_name":"Syrotiuk","salary":"26436","height":"2.00","still_hired":"false","avg_worked_seconds":"248451647","job_positions":"Internship","is_rehired":["true","true"]}
-{ "index": {}}
-{"emp_no":"10049","first_name":"Basil","gender":"F","hire_date":"1992-05-04T00:00:00Z","languages":"5","last_name":"Tramer","salary":"37853","height":"1.52","still_hired":"true","avg_worked_seconds":"320725709","job_positions":["Senior Python Developer","Business Analyst"],"salary_change":"-1.05"}
-{ "index": {}}
-{"birth_date":"1958-05-21T00:00:00Z","emp_no":"10050","first_name":"Yinghua","gender":"M","hire_date":"1990-12-25T00:00:00Z","languages":"2","last_name":"Dredge","salary":"43026","height":"1.96","still_hired":"true","avg_worked_seconds":"242731798","job_positions":["Reporting Analyst","Junior Developer","Accountant","Support Engineer"],"is_rehired":"true","salary_change":["8.70","10.94"]}
-{ "index": {}}
-{"birth_date":"1953-07-28T00:00:00Z","emp_no":"10051","first_name":"Hidefumi","gender":"M","hire_date":"1992-10-15T00:00:00Z","languages":"3","last_name":"Caine","salary":"58121","height":"1.89","still_hired":"true","avg_worked_seconds":"374753122","job_positions":["Business Analyst","Accountant","Purchase Manager"]}
-{ "index": {}}
-{"birth_date":"1961-02-26T00:00:00Z","emp_no":"10052","first_name":"Heping","gender":"M","hire_date":"1988-05-21T00:00:00Z","languages":"1","last_name":"Nitsch","salary":"55360","height":"1.79","still_hired":"true","avg_worked_seconds":"299654717","is_rehired":["true","true","false"],"salary_change":["-0.55","-1.89","-4.22","-6.03"]}
-{ "index": {}}
-{"birth_date":"1954-09-13T00:00:00Z","emp_no":"10053","first_name":"Sanjiv","gender":"F","hire_date":"1986-02-04T00:00:00Z","languages":"3","last_name":"Zschoche","salary":"54462","height":"1.58","still_hired":"false","avg_worked_seconds":"368103911","job_positions":"Support Engineer","is_rehired":["true","false","true","false"],"salary_change":["-7.67","-3.25"]}
-{ "index": {}}
-{"birth_date":"1957-04-04T00:00:00Z","emp_no":"10054","first_name":"Mayumi","gender":"M","hire_date":"1995-03-13T00:00:00Z","languages":"4","last_name":"Schueller","salary":"65367","height":"1.82","still_hired":"false","avg_worked_seconds":"297441693","job_positions":"Principal Support Engineer","is_rehired":["false","false"]}
-{ "index": {}}
-{"birth_date":"1956-06-06T00:00:00Z","emp_no":"10055","first_name":"Georgy","gender":"M","hire_date":"1992-04-27T00:00:00Z","languages":"5","last_name":"Dredge","salary":"49281","height":"2.04","still_hired":"false","avg_worked_seconds":"283157844","job_positions":["Senior Python Developer","Head Human Resources","Internship","Support Engineer"],"is_rehired":["false","false","true"],"salary_change":["7.34","12.99","3.17"]}
-{ "index": {}}
-{"birth_date":"1961-09-01T00:00:00Z","emp_no":"10056","first_name":"Brendon","gender":"F","hire_date":"1990-02-01T00:00:00Z","languages":"2","last_name":"Bernini","salary":"33370","height":"1.57","still_hired":"true","avg_worked_seconds":"349086555","job_positions":"Senior Team Lead","is_rehired":["true","false","false"],"salary_change":["10.99","-5.17"]}
-{ "index": {}}
-{"birth_date":"1954-05-30T00:00:00Z","emp_no":"10057","first_name":"Ebbe","gender":"F","hire_date":"1992-01-15T00:00:00Z","languages":"4","last_name":"Callaway","salary":"27215","height":"1.59","still_hired":"true","avg_worked_seconds":"324356269","job_positions":["Python Developer","Head Human Resources"],"salary_change":["-6.73","-2.43","-5.27","1.03"]}
-{ "index": {}}
-{"birth_date":"1954-10-01T00:00:00Z","emp_no":"10058","first_name":"Berhard","gender":"M","hire_date":"1987-04-13T00:00:00Z","languages":"3","last_name":"McFarlin","salary":"38376","height":"1.83","still_hired":"false","avg_worked_seconds":"268378108","job_positions":"Principal Support Engineer","salary_change":"-4.89"}
-{ "index": {}}
-{"birth_date":"1953-09-19T00:00:00Z","emp_no":"10059","first_name":"Alejandro","gender":"F","hire_date":"1991-06-26T00:00:00Z","languages":"2","last_name":"McAlpine","salary":"44307","height":"1.48","still_hired":"false","avg_worked_seconds":"237368465","job_positions":["Architect","Principal Support Engineer","Purchase Manager","Senior Team Lead"],"is_rehired":"false","salary_change":["5.53","13.38","-4.69","6.27"]}
-{ "index": {}}
-{"birth_date":"1961-10-15T00:00:00Z","emp_no":"10060","first_name":"Breannda","gender":"M","hire_date":"1987-11-02T00:00:00Z","languages":"2","last_name":"Billingsley","salary":"29175","height":"1.42","still_hired":"true","avg_worked_seconds":"341158890","job_positions":["Business Analyst","Data Scientist","Senior Team Lead"],"is_rehired":["false","false","true","false"],"salary_change":["-1.76","-0.85"]}
-{ "index": {}}
-{"birth_date":"1962-10-19T00:00:00Z","emp_no":"10061","first_name":"Tse","gender":"M","hire_date":"1985-09-17T00:00:00Z","languages":"1","last_name":"Herber","salary":"49095","height":"1.45","still_hired":"false","avg_worked_seconds":"327550310","job_positions":["Purchase Manager","Senior Team Lead"],"is_rehired":["false","true"],"salary_change":["14.39","-2.58","-0.95"]}
-{ "index": {}}
-{"birth_date":"1961-11-02T00:00:00Z","emp_no":"10062","first_name":"Anoosh","gender":"M","hire_date":"1991-08-30T00:00:00Z","languages":"3","last_name":"Peyn","salary":"65030","height":"1.70","still_hired":"false","avg_worked_seconds":"203989706","job_positions":["Python Developer","Senior Team Lead"],"is_rehired":["false","true","true"],"salary_change":"-1.17"}
-{ "index": {}}
-{"birth_date":"1952-08-06T00:00:00Z","emp_no":"10063","first_name":"Gino","gender":"F","hire_date":"1989-04-08T00:00:00Z","languages":"3","last_name":"Leonhardt","salary":"52121","height":"1.78","still_hired":"true","avg_worked_seconds":"214068302","is_rehired":"true"}
-{ "index": {}}
-{"birth_date":"1959-04-07T00:00:00Z","emp_no":"10064","first_name":"Udi","gender":"M","hire_date":"1985-11-20T00:00:00Z","languages":"5","last_name":"Jansch","salary":"33956","height":"1.93","still_hired":"false","avg_worked_seconds":"307364077","job_positions":"Purchase Manager","is_rehired":["false","false","true","false"],"salary_change":["-8.66","-2.52"]}
-{ "index": {}}
-{"birth_date":"1963-04-14T00:00:00Z","emp_no":"10065","first_name":"Satosi","gender":"M","hire_date":"1988-05-18T00:00:00Z","languages":"2","last_name":"Awdeh","salary":"50249","height":"1.59","still_hired":"false","avg_worked_seconds":"372660279","job_positions":["Business Analyst","Data Scientist","Principal Support Engineer"],"is_rehired":["false","true"],"salary_change":["-1.47","14.44","-9.81"]}
-{ "index": {}}
-{"birth_date":"1952-11-13T00:00:00Z","emp_no":"10066","first_name":"Kwee","gender":"M","hire_date":"1986-02-26T00:00:00Z","languages":"5","last_name":"Schusler","salary":"31897","height":"2.10","still_hired":"true","avg_worked_seconds":"360906451","job_positions":["Senior Python Developer","Data Scientist","Accountant","Internship"],"is_rehired":["true","true","true"],"salary_change":"5.94"}
-{ "index": {}}
-{"birth_date":"1953-01-07T00:00:00Z","emp_no":"10067","first_name":"Claudi","gender":"M","hire_date":"1987-03-04T00:00:00Z","languages":"2","last_name":"Stavenow","salary":"52044","height":"1.77","still_hired":"true","avg_worked_seconds":"347664141","job_positions":["Tech Lead","Principal Support Engineer"],"is_rehired":["false","false"],"salary_change":["8.72","4.44"]}
-{ "index": {}}
-{"birth_date":"1962-11-26T00:00:00Z","emp_no":"10068","first_name":"Charlene","gender":"M","hire_date":"1987-08-07T00:00:00Z","languages":"3","last_name":"Brattka","salary":"28941","height":"1.58","still_hired":"true","avg_worked_seconds":"233999584","job_positions":"Architect","is_rehired":"true","salary_change":["3.43","-5.61","-5.29"]}
-{ "index": {}}
-{"birth_date":"1960-09-06T00:00:00Z","emp_no":"10069","first_name":"Margareta","gender":"F","hire_date":"1989-11-05T00:00:00Z","languages":"5","last_name":"Bierman","salary":"41933","height":"1.77","still_hired":"true","avg_worked_seconds":"366512352","job_positions":["Business Analyst","Junior Developer","Purchase Manager","Support Engineer"],"is_rehired":"false","salary_change":["-3.34","-6.33","6.23","-0.31"]}
-{ "index": {}}
-{"birth_date":"1955-08-20T00:00:00Z","emp_no":"10070","first_name":"Reuven","gender":"M","hire_date":"1985-10-14T00:00:00Z","languages":"3","last_name":"Garigliano","salary":"54329","height":"1.77","still_hired":"true","avg_worked_seconds":"347188604","is_rehired":["true","true","true"],"salary_change":"-5.90"}
-{ "index": {}}
-{"birth_date":"1958-01-21T00:00:00Z","emp_no":"10071","first_name":"Hisao","gender":"M","hire_date":"1987-10-01T00:00:00Z","languages":"2","last_name":"Lipner","salary":"40612","height":"2.07","still_hired":"false","avg_worked_seconds":"306671693","job_positions":["Business Analyst","Reporting Analyst","Senior Team Lead"],"is_rehired":["false","false","false"],"salary_change":"-2.69"}
-{ "index": {}}
-{"birth_date":"1952-05-15T00:00:00Z","emp_no":"10072","first_name":"Hironoby","gender":"F","hire_date":"1988-07-21T00:00:00Z","languages":"5","last_name":"Sidou","salary":"54518","height":"1.82","still_hired":"true","avg_worked_seconds":"209506065","job_positions":["Architect","Tech Lead","Python Developer","Senior Team Lead"],"is_rehired":["false","false","true","false"],"salary_change":["11.21","-2.30","2.22","-5.44"]}
-{ "index": {}}
-{"birth_date":"1954-02-23T00:00:00Z","emp_no":"10073","first_name":"Shir","gender":"M","hire_date":"1991-12-01T00:00:00Z","languages":"4","last_name":"McClurg","salary":"32568","height":"1.66","still_hired":"false","avg_worked_seconds":"314930367","job_positions":["Principal Support Engineer","Python Developer","Junior Developer","Purchase Manager"],"is_rehired":["true","false"],"salary_change":"-5.67"}
-{ "index": {}}
-{"birth_date":"1955-08-28T00:00:00Z","emp_no":"10074","first_name":"Mokhtar","gender":"F","hire_date":"1990-08-13T00:00:00Z","languages":"5","last_name":"Bernatsky","salary":"38992","height":"1.64","still_hired":"true","avg_worked_seconds":"382397583","job_positions":["Senior Python Developer","Python Developer"],"is_rehired":["true","false","false","true"],"salary_change":["6.70","1.98","-5.64","2.96"]}
-{ "index": {}}
-{"birth_date":"1960-03-09T00:00:00Z","emp_no":"10075","first_name":"Gao","gender":"F","hire_date":"1987-03-19T00:00:00Z","languages":"5","last_name":"Dolinsky","salary":"51956","height":"1.94","still_hired":"false","avg_worked_seconds":"370238919","job_positions":"Purchase Manager","is_rehired":"true","salary_change":["9.63","-3.29","8.42"]}
-{ "index": {}}
-{"birth_date":"1952-06-13T00:00:00Z","emp_no":"10076","first_name":"Erez","gender":"F","hire_date":"1985-07-09T00:00:00Z","languages":"3","last_name":"Ritzmann","salary":"62405","height":"1.83","still_hired":"false","avg_worked_seconds":"376240317","job_positions":["Architect","Senior Python Developer"],"is_rehired":"false","salary_change":["-6.90","-1.30","8.75"]}
-{ "index": {}}
-{"birth_date":"1964-04-18T00:00:00Z","emp_no":"10077","first_name":"Mona","gender":"M","hire_date":"1990-03-02T00:00:00Z","languages":"5","last_name":"Azuma","salary":"46595","height":"1.68","still_hired":"false","avg_worked_seconds":"351960222","job_positions":"Internship","salary_change":"-0.01"}
-{ "index": {}}
-{"birth_date":"1959-12-25T00:00:00Z","emp_no":"10078","first_name":"Danel","gender":"F","hire_date":"1987-05-26T00:00:00Z","languages":"2","last_name":"Mondadori","salary":"69904","height":"1.81","still_hired":"true","avg_worked_seconds":"377116038","job_positions":["Architect","Principal Support Engineer","Internship"],"is_rehired":"true","salary_change":["-7.88","9.98","12.52"]}
-{ "index": {}}
-{"birth_date":"1961-10-05T00:00:00Z","emp_no":"10079","first_name":"Kshitij","gender":"F","hire_date":"1986-03-27T00:00:00Z","languages":"2","last_name":"Gils","salary":"32263","height":"1.59","still_hired":"false","avg_worked_seconds":"320953330","is_rehired":"false","salary_change":"7.58"}
-{ "index": {}}
-{"birth_date":"1957-12-03T00:00:00Z","emp_no":"10080","first_name":"Premal","gender":"M","hire_date":"1985-11-19T00:00:00Z","languages":"5","last_name":"Baek","salary":"52833","height":"1.80","still_hired":"false","avg_worked_seconds":"239266137","job_positions":"Senior Python Developer","salary_change":["-4.35","7.36","5.56"]}
-{ "index": {}}
-{"birth_date":"1960-12-17T00:00:00Z","emp_no":"10081","first_name":"Zhongwei","gender":"M","hire_date":"1986-10-30T00:00:00Z","languages":"2","last_name":"Rosen","salary":"50128","height":"1.44","still_hired":"true","avg_worked_seconds":"321375511","job_positions":["Accountant","Internship"],"is_rehired":["false","false","false"]}
-{ "index": {}}
-{"birth_date":"1963-09-09T00:00:00Z","emp_no":"10082","first_name":"Parviz","gender":"M","hire_date":"1990-01-03T00:00:00Z","languages":"4","last_name":"Lortz","salary":"49818","height":"1.61","still_hired":"false","avg_worked_seconds":"232522994","job_positions":"Principal Support Engineer","is_rehired":"false","salary_change":["1.19","-3.39"]}
-{ "index": {}}
-{"birth_date":"1959-07-23T00:00:00Z","emp_no":"10083","first_name":"Vishv","gender":"M","hire_date":"1987-03-31T00:00:00Z","languages":"1","last_name":"Zockler","salary":"39110","height":"1.42","still_hired":"false","avg_worked_seconds":"331236443","job_positions":"Head Human Resources"}
-{ "index": {}}
-{"birth_date":"1960-05-25T00:00:00Z","emp_no":"10084","first_name":"Tuval","gender":"M","hire_date":"1995-12-15T00:00:00Z","languages":"1","last_name":"Kalloufi","salary":"28035","height":"1.51","still_hired":"true","avg_worked_seconds":"359067056","job_positions":"Principal Support Engineer","is_rehired":"false"}
-{ "index": {}}
-{"birth_date":"1962-11-07T00:00:00Z","emp_no":"10085","first_name":"Kenroku","gender":"M","hire_date":"1994-04-09T00:00:00Z","languages":"5","last_name":"Malabarba","salary":"35742","height":"2.01","still_hired":"true","avg_worked_seconds":"353404008","job_positions":["Senior Python Developer","Business Analyst","Tech Lead","Accountant"],"salary_change":["11.67","6.75","8.40"]}
-{ "index": {}}
-{"birth_date":"1962-11-19T00:00:00Z","emp_no":"10086","first_name":"Somnath","gender":"M","hire_date":"1990-02-16T00:00:00Z","languages":"1","last_name":"Foote","salary":"68547","height":"1.74","still_hired":"true","avg_worked_seconds":"328580163","job_positions":"Senior Python Developer","is_rehired":["false","true"],"salary_change":"13.61"}
-{ "index": {}}
-{"birth_date":"1959-07-23T00:00:00Z","emp_no":"10087","first_name":"Xinglin","gender":"F","hire_date":"1986-09-08T00:00:00Z","languages":"5","last_name":"Eugenio","salary":"32272","height":"1.74","still_hired":"true","avg_worked_seconds":"305782871","job_positions":["Junior Developer","Internship"],"is_rehired":["false","false"],"salary_change":"-2.05"}
-{ "index": {}}
-{"birth_date":"1954-02-25T00:00:00Z","emp_no":"10088","first_name":"Jungsoon","gender":"F","hire_date":"1988-09-02T00:00:00Z","languages":"5","last_name":"Syrzycki","salary":"39638","height":"1.91","still_hired":"false","avg_worked_seconds":"330714423","job_positions":["Reporting Analyst","Business Analyst","Tech Lead"],"is_rehired":"true"}
-{ "index": {}}
-{"birth_date":"1963-03-21T00:00:00Z","emp_no":"10089","first_name":"Sudharsan","gender":"F","hire_date":"1986-08-12T00:00:00Z","languages":"4","last_name":"Flasterstein","salary":"43602","height":"1.57","still_hired":"true","avg_worked_seconds":"232951673","job_positions":["Junior Developer","Accountant"],"is_rehired":["true","false","false","false"]}
-{ "index": {}}
-{"birth_date":"1961-05-30T00:00:00Z","emp_no":"10090","first_name":"Kendra","gender":"M","hire_date":"1986-03-14T00:00:00Z","languages":"2","last_name":"Hofting","salary":"44956","height":"2.03","still_hired":"true","avg_worked_seconds":"212460105","is_rehired":["false","false","false","true"],"salary_change":["7.15","-1.85","3.60"]}
-{ "index": {}}
-{"birth_date":"1955-10-04T00:00:00Z","emp_no":"10091","first_name":"Amabile","gender":"M","hire_date":"1992-11-18T00:00:00Z","languages":"3","last_name":"Gomatam","salary":"38645","height":"2.09","still_hired":"true","avg_worked_seconds":"242582807","job_positions":["Reporting Analyst","Python Developer"],"is_rehired":["true","true","false","false"],"salary_change":["-9.23","7.50","5.85","5.19"]}
-{ "index": {}}
-{"birth_date":"1964-10-18T00:00:00Z","emp_no":"10092","first_name":"Valdiodio","gender":"F","hire_date":"1989-09-22T00:00:00Z","languages":"1","last_name":"Niizuma","salary":"25976","height":"1.75","still_hired":"false","avg_worked_seconds":"313407352","job_positions":["Junior Developer","Accountant"],"is_rehired":["false","false","true","true"],"salary_change":["8.78","0.39","-6.77","8.30"]}
-{ "index": {}}
-{"birth_date":"1964-06-11T00:00:00Z","emp_no":"10093","first_name":"Sailaja","gender":"M","hire_date":"1996-11-05T00:00:00Z","languages":"3","last_name":"Desikan","salary":"45656","height":"1.69","still_hired":"false","avg_worked_seconds":"315904921","job_positions":["Reporting Analyst","Tech Lead","Principal Support Engineer","Purchase Manager"],"salary_change":"-0.88"}
-{ "index": {}}
-{"birth_date":"1957-05-25T00:00:00Z","emp_no":"10094","first_name":"Arumugam","gender":"F","hire_date":"1987-04-18T00:00:00Z","languages":"5","last_name":"Ossenbruggen","salary":"66817","height":"2.10","still_hired":"false","avg_worked_seconds":"332920135","job_positions":["Senior Python Developer","Principal Support Engineer","Accountant"],"is_rehired":["true","false","true"],"salary_change":["2.22","7.92"]}
-{ "index": {}}
-{"birth_date":"1965-01-03T00:00:00Z","emp_no":"10095","first_name":"Hilari","gender":"M","hire_date":"1986-07-15T00:00:00Z","languages":"4","last_name":"Morton","salary":"37702","height":"1.55","still_hired":"false","avg_worked_seconds":"321850475","is_rehired":["true","true","false","false"],"salary_change":["-3.93","-6.66"]}
-{ "index": {}}
-{"birth_date":"1954-09-16T00:00:00Z","emp_no":"10096","first_name":"Jayson","gender":"M","hire_date":"1990-01-14T00:00:00Z","languages":"4","last_name":"Mandell","salary":"43889","height":"1.94","still_hired":"false","avg_worked_seconds":"204381503","job_positions":["Architect","Reporting Analyst"],"is_rehired":["false","false","false"]}
-{ "index": {}}
-{"birth_date":"1952-02-27T00:00:00Z","emp_no":"10097","first_name":"Remzi","gender":"M","hire_date":"1990-09-15T00:00:00Z","languages":"3","last_name":"Waschkowski","salary":"71165","height":"1.53","still_hired":"false","avg_worked_seconds":"206258084","job_positions":["Reporting Analyst","Tech Lead"],"is_rehired":["true","false"],"salary_change":"-1.12"}
-{ "index": {}}
-{"birth_date":"1961-09-23T00:00:00Z","emp_no":"10098","first_name":"Sreekrishna","gender":"F","hire_date":"1985-05-13T00:00:00Z","languages":"4","last_name":"Servieres","salary":"44817","height":"2.00","still_hired":"false","avg_worked_seconds":"272392146","job_positions":["Architect","Internship","Senior Team Lead"],"is_rehired":"false","salary_change":["-2.83","8.31","4.38"]}
-{ "index": {}}
-{"birth_date":"1956-05-25T00:00:00Z","emp_no":"10099","first_name":"Valter","gender":"F","hire_date":"1988-10-18T00:00:00Z","languages":"2","last_name":"Sullins","salary":"73578","height":"1.81","still_hired":"true","avg_worked_seconds":"377713748","is_rehired":["true","true"],"salary_change":["10.71","14.26","-8.78","-3.98"]}
-{ "index": {}}
-{"birth_date":"1953-04-21T00:00:00Z","emp_no":"10100","first_name":"Hironobu","gender":"F","hire_date":"1987-09-21T00:00:00Z","languages":"4","last_name":"Haraldson","salary":"68431","height":"1.77","still_hired":"true","avg_worked_seconds":"223910853","job_positions":"Purchase Manager","is_rehired":["false","true","true","false"],"salary_change":["13.97","-7.49"]}
---------------------------------------------------
-// TEST[skip:TBD]
-====
-
-[discrete]
-[[convert-dataset-pandas-dataframe]]
-==== Convert the dataset
-
-Use the ES|QL CSV import to convert the `employees` dataset to a Pandas
-dataframe object. 
-
-[source,python]
-------------------------------------
-from io import StringIO
-from elasticsearch import Elasticsearch
-import pandas as pd
-client = Elasticsearch(
-    "https://[host].elastic-cloud.com",
-    api_key="...",
-)
-response = client.esql.query(
-    query="FROM employees | LIMIT 500",
-    format="csv",
-)
-df = pd.read_csv(StringIO(response.body))
-print(df)
-------------------------------------
-
-Even though the dataset contains only 100 records, a LIMIT of 500 is specified to suppress
-ES|QL warnings about potentially missing records. This prints the
-following dataframe:
-
-[source,python]
-------------------------------------
-    avg_worked_seconds  ...  salary_change.long still_hired
-0            268728049  ...                   1        True
-1            328922887  ...            [-7, 11]        True
-2            200296405  ...            [12, 14]       False
-3            311267831  ...       [0, 1, 3, 13]        True
-4            244294991  ...            [-2, 13]        True
-..                 ...  ...                 ...         ...
-95           204381503  ...                 NaN       False
-96           206258084  ...                  -1       False
-97           272392146  ...          [-2, 4, 8]       False
-98           377713748  ...    [-8, -3, 10, 14]        True
-99           223910853  ...            [-7, 13]        True
-------------------------------------
-
-You can now analyze the data with Pandas or you can also continue transforming
-the data using ES|QL.
-
-
-[discrete]
-[[analyze-data]]
-==== Analyze the data with Pandas
-
-In the next example, the {ref}/esql-commands.html#esql-stats-by[STATS ... BY]
-command is utilized to count how many employees are speaking a given language.
-The results are sorted with the `languages` column using
-{ref}/esql-commands.html#esql-sort[SORT]:
-
-[source,python]
-------------------------------------
-response = client.esql.query(
-    query="""
-    FROM employees
-    | STATS count = COUNT(emp_no) BY languages
-    | SORT languages
-    | LIMIT 500
-    """,
-    format="csv",
-)
-df = pd.read_csv(
-    StringIO(response.body),
-    dtype={"count": "Int64", "languages": "Int64"},
-)
-print(df)
-------------------------------------
-
-Note that the `dtype` parameter of `pd.read_csv()` is useful when the type
-inferred by Pandas is not enough. The code prints the following response:
-
-[source,python]
-------------------------------------
-   count  languages
-0     15          1
-1     19          2
-2     17          3
-3     18          4
-4     21          5
-------------------------------------
-
-
-[discrete]
-[[passing-params]]
-==== Pass parameters to a query with ES|QL
-
-Use the 
-{ref}/esql-rest.html#esql-rest-params[built-in parameters support of the ES|QL REST API]
-to pass parameters to a query:
-
-[source,python]
-------------------------------------
-response = client.esql.query(
-    query="""
-    FROM employees
-    | STATS count = COUNT(emp_no) BY languages
-    | WHERE languages >= (?)
-    | SORT languages
-    | LIMIT 500
-    """,
-    format="csv",
-    params=[3],
-)
-df = pd.read_csv(
-    StringIO(response.body),
-    dtype={"count": "Int64", "languages": "Int64"},
-)
-print(df)
-------------------------------------
-
-The code above outputs the following:
-
-[source,python]
-------------------------------------
-   count  languages
-0     17          3
-1     18          4
-2     21          5
-------------------------------------
-
-If you want to learn more about ES|QL, refer to the
-{ref}/esql.html[ES|QL documentation]. You can also check out this other 
-https://github.com/elastic/elasticsearch-labs/blob/main/supporting-blog-content/Boston-Celtics-Demo/celtics-esql-demo.ipynb[Python example using Boston Celtics data].
\ No newline at end of file
diff -pruN 8.17.2-2/docs/guide/examples.asciidoc 9.2.0-1/docs/guide/examples.asciidoc
--- 8.17.2-2/docs/guide/examples.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/examples.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,111 +0,0 @@
-[[examples]]
-== Examples
-
-Below you can find examples of how to use the most frequently called APIs with 
-the Python client.
-
-* <<ex-index>>
-* <<ex-get>>
-* <<ex-refresh>>
-* <<ex-search>>
-* <<ex-update>>
-* <<ex-delete>>
-
-[discrete]
-[[ex-index]]
-=== Indexing a document
-  
-To index a document, you need to specify three pieces of information: `index`, 
-`id`, and a `document`:
-
-[source,py]
-----------------------------
-from datetime import datetime
-from elasticsearch import Elasticsearch
-client = Elasticsearch('https://localhost:9200')
-
-doc = {
-    'author': 'author_name',
-    'text': 'Interesting content...',
-    'timestamp': datetime.now(),
-}
-resp = client.index(index="test-index", id=1, document=doc)
-print(resp['result'])
-----------------------------
-
-
-[discrete]
-[[ex-get]]
-=== Getting a document 
-
-To get a document, you need to specify its `index` and `id`:
-
-[source,py]
-----------------------------
-resp = client.get(index="test-index", id=1)
-print(resp['_source'])
-----------------------------
-
-
-[discrete]
-[[ex-refresh]]
-=== Refreshing an index
-
-You can perform the refresh operation on an index:
-
-[source,py]
-----------------------------
-client.indices.refresh(index="test-index")
-----------------------------
-
-
-[discrete]
-[[ex-search]]
-=== Searching for a document
-
-The `search()` method returns results that are matching a query:
-
-[source,py]
-----------------------------
-resp = client.search(index="test-index", query={"match_all": {}})
-print("Got %d Hits:" % resp['hits']['total']['value'])
-for hit in resp['hits']['hits']:
-    print("%(timestamp)s %(author)s: %(text)s" % hit["_source"])
-----------------------------
-
-
-[discrete]
-[[ex-update]]
-=== Updating a document
-
-To update a document, you need to specify three pieces of information: `index`, 
-`id`, and a `doc`:
-
-[source,py]
-----------------------------
-from datetime import datetime
-from elasticsearch import Elasticsearch
-
-client = Elasticsearch('https://localhost:9200')
-
-doc = {
-    'author': 'author_name',
-    'text': 'Interesting modified content...',
-    'timestamp': datetime.now(),
-}
-resp = client.update(index="test-index", id=1, doc=doc)
-print(resp['result'])
-----------------------------
-
-
-[discrete]
-[[ex-delete]]
-=== Deleting a document
-
-You can delete a document by specifying its `index`, and `id` in the `delete()` 
-method:
-
-[source,py]
-----------------------------
-client.delete(index="test-index", id=1)
-----------------------------
diff -pruN 8.17.2-2/docs/guide/getting-started.asciidoc 9.2.0-1/docs/guide/getting-started.asciidoc
--- 8.17.2-2/docs/guide/getting-started.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/getting-started.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,153 +0,0 @@
-[[getting-started-python]]
-== Getting started
-
-This page guides you through the installation process of the Python client, 
-shows you how to instantiate the client, and how to perform basic Elasticsearch 
-operations with it.
-
-[discrete]
-=== Requirements
-
-* https://www.python.org/[Python] 3.8 or newer
-* https://pip.pypa.io/en/stable/[`pip`], installed by default alongside Python
-
-[discrete]
-=== Installation 
-
-To install the latest version of the client, run the following command:
-
-[source,shell]
---------------------------
-python -m pip install elasticsearch
---------------------------
-
-Refer to the <<installation>> page to learn more.
-
-
-[discrete]
-=== Connecting
-
-You can connect to the Elastic Cloud using an API key and the Elasticsearch 
-endpoint. 
-
-[source,py]
-----
-from elasticsearch import Elasticsearch
-
-client = Elasticsearch(
-    "https://...",  # Elasticsearch endpoint
-    api_key="api_key",
-)
-----
-
-Your Elasticsearch endpoint can be found on the **My deployment** page of your 
-deployment:
-
-image::images/es-endpoint.jpg[alt="Finding Elasticsearch endpoint",align="center"]
-
-You can generate an API key on the **Management** page under Security.
-
-image::images/create-api-key.png[alt="Create API key",align="center"]
-
-For other connection options, refer to the <<connecting>> section.
-
-
-[discrete]
-=== Operations
-
-Time to use Elasticsearch! This section walks you through the basic, and most 
-important, operations of Elasticsearch. For more operations and more advanced 
-examples, refer to the <<examples>> page.
-
-
-[discrete]
-==== Creating an index
-
-This is how you create the `my_index` index:
-
-[source,py]
-----
-client.indices.create(index="my_index")
-----
-
-
-[discrete]
-==== Indexing documents
-
-This is a simple way of indexing a document:
-
-[source,py]
-----
-client.index(
-    index="my_index",
-    id="my_document_id",
-    document={
-        "foo": "foo",
-        "bar": "bar",
-    }
-)
-----
-
-
-[discrete]
-==== Getting documents
-
-You can get documents by using the following code:
-
-[source,py]
-----
-client.get(index="my_index", id="my_document_id")
-----
-
-
-[discrete]
-==== Searching documents
-
-This is how you can create a single match query with the Python client: 
-
-[source,py]
-----
-client.search(index="my_index", query={
-    "match": {
-        "foo": "foo"
-    }
-})
-----
-
-
-[discrete]
-==== Updating documents
-
-This is how you can update a document, for example to add a new field:
-
-[source,py]
-----
-client.update(index="my_index", id="my_document_id", doc={
-    "foo": "bar",
-    "new_field": "new value",
-})
-----
-
-
-[discrete]
-==== Deleting documents
-
-[source,py]
-----
-client.delete(index="my_index", id="my_document_id")
-----
-
-
-[discrete]
-==== Deleting an index
-
-[source,py]
-----
-client.indices.delete(index="my_index")
-----
-
-
-[discrete]
-== Further reading
-
-* Use <<client-helpers>> for a more comfortable experience with the APIs.
diff -pruN 8.17.2-2/docs/guide/helpers.asciidoc 9.2.0-1/docs/guide/helpers.asciidoc
--- 8.17.2-2/docs/guide/helpers.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/helpers.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,90 +0,0 @@
-[[client-helpers]]
-== Client helpers
-
-You can find here a collection of simple helper functions that abstract some 
-specifics of the raw API. For detailed examples, refer to 
-https://elasticsearch-py.readthedocs.io/en/stable/helpers.html[this page].
-
-
-[discrete]
-[[bulk-helpers]]
-=== Bulk helpers 
-
-There are several helpers for the bulk API since its requirement for specific 
-formatting and other considerations can make it cumbersome if used directly.
-
-All bulk helpers accept an instance of `{es}` class and an iterable `action` 
-(any iterable, can also be a generator, which is ideal in most cases since it 
-allows you to index large datasets without the need of loading them into 
-memory).
-
-The items in the iterable `action` should be the documents we wish to index in 
-several formats. The most common one is the same as returned by `search()`, for 
-example:
-
-[source,yml]
-----------------------------
-{
-  '_index': 'index-name',
-  '_id': 42,
-  '_routing': 5,
-  'pipeline': 'my-ingest-pipeline',
-  '_source': {
-    "title": "Hello World!",
-    "body": "..."
-  }
-}
-----------------------------
-
-Alternatively, if `_source` is not present, it pops all metadata fields from 
-the doc and use the rest as the document data:
-
-[source,yml]
-----------------------------
-{
-  "_id": 42,
-  "_routing": 5,
-  "title": "Hello World!",
-  "body": "..."
-}
-----------------------------
-
-The `bulk()` api accepts `index`, `create`, `delete`, and `update` actions. Use 
-the `_op_type` field to specify an action (`_op_type` defaults to `index`):
-
-[source,yml]
-----------------------------
-{
-  '_op_type': 'delete',
-  '_index': 'index-name',
-  '_id': 42,
-}
-{
-  '_op_type': 'update',
-  '_index': 'index-name',
-  '_id': 42,
-  'doc': {'question': 'The life, universe and everything.'}
-}
-----------------------------
-
-
-[discrete]
-[[scan]]
-=== Scan
-
-Simple abstraction on top of the `scroll()` API - a simple iterator that yields 
-all hits as returned by underlining scroll requests.
-
-By default scan does not return results in any pre-determined order. To have a 
-standard order in the returned documents (either by score or explicit sort 
-definition) when scrolling, use `preserve_order=True`. This may be an expensive 
-operation and will negate the performance benefits of using `scan`.
-
-
-[source,py]
-----------------------------
-scan(es,
-    query={"query": {"match": {"title": "python"}}},
-    index="orders-*"
-)
-----------------------------
\ No newline at end of file
Binary files 8.17.2-2/docs/guide/images/create-api-key.png and 9.2.0-1/docs/guide/images/create-api-key.png differ
Binary files 8.17.2-2/docs/guide/images/es-endpoint.jpg and 9.2.0-1/docs/guide/images/es-endpoint.jpg differ
Binary files 8.17.2-2/docs/guide/images/otel-waterfall-retry.png and 9.2.0-1/docs/guide/images/otel-waterfall-retry.png differ
Binary files 8.17.2-2/docs/guide/images/otel-waterfall-with-http.png and 9.2.0-1/docs/guide/images/otel-waterfall-with-http.png differ
Binary files 8.17.2-2/docs/guide/images/otel-waterfall-without-http.png and 9.2.0-1/docs/guide/images/otel-waterfall-without-http.png differ
diff -pruN 8.17.2-2/docs/guide/index-custom-title-page.html 9.2.0-1/docs/guide/index-custom-title-page.html
--- 8.17.2-2/docs/guide/index-custom-title-page.html	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/index-custom-title-page.html	2025-10-28 16:50:53.000000000 +0000
@@ -95,6 +95,9 @@
       <li>
         <a href="config.html">Configuring the client</a>
       </li>
+      <li>
+        <a href="async.html">Using the client with asyncio</a>
+      </li>
     </ul>
   </div>
 
@@ -136,6 +139,9 @@
         <a href="integrations.html">Integrations</a>
       </li>
       <li>
+        <a href="elasticsearch-dsl.html">Python DSL</a>
+      </li>
+      <li>
         <a href="client-helpers.html">Client helpers</a>
       </li>
       <li>
@@ -186,4 +192,4 @@
     </div>
   </div>
 
-  <p class="my-4"><a href="https://www.elastic.co/guide/index.html">View all Elastic docs</a></p>
\ No newline at end of file
+  <p class="my-4"><a href="https://www.elastic.co/guide/index.html">View all Elastic docs</a></p>
diff -pruN 8.17.2-2/docs/guide/index.asciidoc 9.2.0-1/docs/guide/index.asciidoc
--- 8.17.2-2/docs/guide/index.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/index.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,27 +0,0 @@
-= Elasticsearch Python Client
-
-:doctype:           book
-
-include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[]
-
-include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
-
-include::overview.asciidoc[]
-
-include::getting-started.asciidoc[]
-
-include::installation.asciidoc[]
-
-include::connecting.asciidoc[]
-
-include::configuration.asciidoc[]
-
-include::migration.asciidoc[]
-
-include::integrations.asciidoc[]
-
-include::examples.asciidoc[]
-
-include::helpers.asciidoc[]
-
-include::release-notes.asciidoc[]
\ No newline at end of file
diff -pruN 8.17.2-2/docs/guide/installation.asciidoc 9.2.0-1/docs/guide/installation.asciidoc
--- 8.17.2-2/docs/guide/installation.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/installation.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,25 +0,0 @@
-[[installation]]
-== Installation
-
-**https://www.elastic.co/downloads/elasticsearch[Download the latest version of Elasticsearch]**
-or
-**https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page[sign-up]**
-**for a free trial of Elastic Cloud**.
-
-The Python client for {es} can be installed with pip:
-
-[source,sh]
--------------------------------------
-$ python -m pip install elasticsearch
--------------------------------------
-
-If your application uses async/await in Python you can install with the `async` 
-extra:
-
-[source,sh]
---------------------------------------------
-$ python -m pip install elasticsearch[async]
---------------------------------------------
-
-Read more about 
-https://elasticsearch-py.readthedocs.io/en/master/async.html[how to use asyncio with this project].
diff -pruN 8.17.2-2/docs/guide/integrations.asciidoc 9.2.0-1/docs/guide/integrations.asciidoc
--- 8.17.2-2/docs/guide/integrations.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/integrations.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,75 +0,0 @@
-[[integrations]]
-== Integrations
-
-You can find integration options and information on this page.
-
-
-[discrete]
-[[opentelemetry-intro]]
-=== OpenTelemetry instrumentation
-
-The Python Elasticsearch client supports native OpenTelemetry instrumentation following the https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/[OpenTelemetry Semantic Conventions for Elasticsearch].
-Refer to the <<opentelemetry>> page for details.
-
-
-[discrete]
-[[esql-intro]]
-=== ES|QL
-
-{ref}/esql.html[ES|QL] is available through the Python Elasticsearch client.
-Refer to the <<esql-pandas>> page to learn more about using ES|QL and Pandas together with dataframes.
-
-
-[discrete]
-[[transport]]
-=== Transport
-
-The handling of connections, retries, and pooling is handled by the https://github.com/elastic/elastic-transport-python[Elastic Transport Python] library.
-Documentation on the low-level classes is available on https://elastic-transport-python.readthedocs.io[Read the Docs].
-
-
-[discrete]
-[[opaque-id]]
-=== Tracking requests with Opaque ID
-
-You can enrich your requests against Elasticsearch with an identifier string, that allows you to discover this identifier in https://www.elastic.co/guide/en/elasticsearch/reference/current/logging.html#deprecation-logging[deprecation logs], to support you with https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin]
-or to help with https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html#_identifying_running_tasks[identifying running tasks].
-
-The opaque ID can be set via the `opaque_id` parameter via the client `.options()` method:
-
-[source,python]
-------------------------------------
-client = Elasticsearch(...)
-client.options(opaque_id="request-id-...").search(...)
-------------------------------------
-
-
-[discrete]
-[[type-hints]]
-=== Type Hints
-
-Starting in `elasticsearch-py` v7.10.0 the library now ships with https://www.python.org/dev/peps/pep-0484[type hints] and supports basic static type analysis with tools like http://mypy-lang.org[Mypy] and https://github.com/microsoft/pyright[Pyright].
-
-If we write a script that has a type error like using `request_timeout` with a `str` argument instead of `float` and then run Mypy on the script:
-
-[source,python]
-------------------------------------
-# script.py
-from elasticsearch import Elasticsearch
-
-client = Elasticsearch(...)
-client.options(
-    request_timeout="5"  # type error!
-).search(...)
-
-# $ mypy script.py
-# script.py:5: error: Argument "request_timeout" to "search" of "Elasticsearch" has 
-#                     incompatible type "str"; expected "Union[int, float, None]"
-# Found 1 error in 1 file (checked 1 source file)
-------------------------------------
-
-Type hints also allow tools like your IDE to check types and provide better auto-complete functionality.
-
-
-include::open-telemetry.asciidoc[]
-include::esql-pandas.asciidoc[]
\ No newline at end of file
diff -pruN 8.17.2-2/docs/guide/migration.asciidoc 9.2.0-1/docs/guide/migration.asciidoc
--- 8.17.2-2/docs/guide/migration.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/migration.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,377 +0,0 @@
-[[migration]]
-== Migrating to 8.0
-
-The client has major changes that require changes to how you use the client.
-Below outlines all the changes you'll have to take into account when upgrading
-from 7.x to 8.0.
-
-* <<migration-compat-mode>>
-* <<migration-upgrade-client>>
-* <<migration-remove-deprecations>>
-* <<migration-strict-client-config>>
-* <<migration-keyword-only-args>>
-* <<migration-options>>
-* <<migration-response-types>>
-* <<migration-error-types>>
-
-[discrete]
-[[migration-compat-mode]]
-=== Enable compatibility mode and upgrade Elasticsearch
-
-Upgrade your Elasticsearch client to 7.16:
-
-[source,bash]
-------------------------------------
-$ python -m pip install --upgrade 'elasticsearch>=7.16,<8'
-------------------------------------
-
-If you have an existing application enable the compatibility mode
-by setting `ELASTIC_CLIENT_APIVERSIONING=1` environment variable.
-This will instruct the Elasticsearch server to accept and respond
-with 7.x-compatibile requests and responses.
-
-After you've done this you can https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html[upgrade the Elasticsearch server to 8.0.0].
-
-[discrete]
-[[migration-upgrade-client]]
-=== Upgrading the client
-
-After you've deployed your application with the 7.16 client and
-using an 8.0.0 Elasticsearch server you can upgrade your client to
-be 8.0.
-
-[source,bash]
-------------------------------------
-$ python -m pip install --upgrade 'elasticsearch>=8,<9'
-------------------------------------
-
-[discrete]
-[[migration-remove-deprecations]]
-=== Removing deprecation warnings
-
-You'll likely notice after upgrading the client to 8.0 your code is
-either raising errors or `DeprecationWarning` to signal where you need
-to change your code before using the 8.0 client.
-
-
-[discrete]
-[[migration-strict-client-config]]
-==== Strict client configuration
-
-Previously the client would use `scheme="http"`, `host="localhost"`, and `port=9200` defaults
-when specifying which node(s) to connect to. Starting in 8.0 these defaults have been removed
-and instead require explicit configuration of scheme, host, and port or to be configured
-using `cloud_id` to avoid confusion about which Elasticsearch instance is being connected to.
-
-This choice was made because starting 8.0.0 Elasticsearch enables HTTPS is by default, so it's no
-longer a good assumption that `http://localhost:9200` is the locally running cluster.
-
-See documentation on <<connecting, connecting to Elasticsearch>> and <<tls-and-ssl, configuring HTTPS>>.
-
-For quick examples, using a configuration like one of the two below works best:
-
-[source,python]
-------------------------------------
-from elasticsearch import Elasticsearch
-
-# If you're connecting to an instance on Elastic Cloud:
-client = Elasticsearch(
-    cloud_id="cluster-1:dXMa5Fx...",
-
-    # Include your authentication like 'api_key'
-    # 'basic_auth', or 'bearer_auth' here.
-    basic_auth=("elastic", "<password>")
-)
-
-# If you're connecting to an instance hosted elsewhere:
-client = Elasticsearch(
-    # Notice that the scheme (https://) host (localhost),
-    # and port (9200) are explicit here:
-    "http://localhost:9200",
-
-    # Include your authentication like 'api_key'
-    # 'basic_auth', or 'bearer_auth' here:
-    api_key="api_key"
-)
-------------------------------------
-
-[discrete]
-[[migration-keyword-only-args]]
-==== Keyword-only arguments for APIs
-
-APIs used to support both positional and keyword arguments, however
-using **keyword-only arguments was always recommended** in the documentation.
-Starting in 7.14 using positional arguments would raise a `DeprecationWarning` but would still work.
-
-Now starting in 8.0 keyword-only arguments are now required for APIs for better forwards-compatibility
-with new API options. When attempting to use positional arguments a `TypeError` will be raised.
-
-[source,python]
-------------------------------------
-# 8.0+ SUPPORTED USAGE:
-client.indices.get(index="*")
-
-# 7.x UNSUPPORTED USAGE (Don't do this!):
-client.indices.get("*")
-------------------------------------
-
-[discrete]
-[[migration-options]]
-==== Start using .options() for transport parameters
-
-Previously some per-request options like `api_key` and `ignore` were allowed within
-client API methods. Starting in 8.0 this is deprecated for all APIs and for a small
-number of APIs may break in unexpected ways if not changed.
-
-The parameters `headers`, `api_key`, `http_auth`, `opaque_id`, `request_timeout`, and `ignore`
-are effected:
-
-[source,python]
-------------------------------------
-from elasticsearch import Elasticsearch
-
-client = Elasticsearch("http://localhost:9200")
-
-# 8.0+ SUPPORTED USAGE:
-client.options(api_key="api_key").search(index="blogs")
-
-# 7.x DEPRECATED USAGE (Don't do this!):
-client.search(index="blogs", api_key=("id", "api_key"))
-------------------------------------
-
-Some of these parameters have been renamed to be more readable and to fit other APIs.
-`ignore` should be `ignore_status` and `http_auth` should be `basic_auth`:
-
-[source,python]
-------------------------------------
-# 8.0+ SUPPORTED USAGES:
-client.options(basic_auth=("username", "password")).search(...)
-client.options(ignore_status=404).indices.delete(index=...)
-
-# 7.x DEPRECATED USAGES (Don't do this!):
-client.search(http_auth=("username", "password"), ...)
-client.indices.delete(index=..., ignore=404)
-------------------------------------
-
-APIs where this change is breaking and doesn't have a deprecation period due to conflicts
-between the client API and Elasticsearch's API:
-
-- `sql.query` using `request_timeout`
-- `security.grant_api_key` using `api_key`
-- `render_search_template` using `params`
-- `search_template` using `params`
-
-You should immediately evaluate the usage of these parameters and start using `.options(...)`
-to avoid unexpected behavior. Below is an example of migrating away from using per-request `api_key`
-with the `security.grant_api_key` API:
-
-[source,python]
-------------------------------------
-# 8.0+ SUPPORTED USAGE:
-resp = (
-    client.options(
-        # This is the API key being used for the request
-        api_key="request-api-key"
-    ).security.grant_api_key(
-        # This is the API key being granted
-        api_key={
-            "name": "granted-api-key"
-        },
-        grant_type="password",
-        username="elastic",
-        password="changeme"
-    )
-)
-
-# 7.x DEPRECATED USAGE (Don't do this!):
-resp = (
-    # This is the API key being used for the request
-    client.security.grant_api_key(
-        api_key=("request-id", "request-api-key"),
-        body={
-            # This is the API key being granted
-            "api_key": {
-                "name": "granted-api-key"
-            },
-            "grant_type": "password",
-            "username": "elastic",
-            "password": "changeme"
-        }
-    )
-)
-------------------------------------
-
-Starting with the 8.12 client, using a body parameter is fully supported again, meaning you can also use `grant_api_key` like this:
-
-[source,python]
-------------------------------------
-# 8.12+ SUPPORTED USAGE:
-resp = (
-    client.options(
-        # This is the API key being used for the request
-        api_key="request-api-key"
-    ).security.grant_api_key(
-        body={
-            # This is the API key being granted
-            "api_key": {
-                "name": "granted-api-key"
-            },
-            "grant_type": "password",
-            "username": "elastic",
-            "password": "changeme"
-        }
-    )
-)
-------------------------------------
-
-[discrete]
-[[migration-response-types]]
-==== Changes to API responses
-
-In 7.x and earlier the return type for API methods were the raw deserialized response body.
-This meant that there was no way to access HTTP status codes, headers, or other information
-from the transport layer.
-
-In 8.0.0 responses are no longer the raw deserialized response body and instead an object
-with two properties, `meta` and `body`. Transport layer metadata about the response
-like HTTP status, headers, version, and which node serviced the request are available here:
-
-[source,python]
-------------------------------------
->>> resp = client.search(...)
-
-# Response is not longer a 'dict'
->>> resp
-ObjectApiResponse({'took': 1, 'timed_out': False, ...})
-
-# But can still be used like one:
->>> resp["hits"]["total"]
-{'value': 5500, 'relation': 'eq'}
-
->>> resp.keys()
-dict_keys(['took', 'timed_out', '_shards', 'hits'])
-
-# HTTP status
->>> resp.meta.status
-200
-
-# HTTP headers
->>> resp.meta.headers['content-type']
-'application/json'
-
-# HTTP version
->>> resp.meta.http_version
-'1.1'
-------------------------------------
-
-Because the response is no longer a dictionary, list, `str`, or `bytes` instance
-calling `isintance()` on the response object will return `False`. If you need
-direct access to the underlying deserialized response body you can use the `body`
-property:
-
-[source,python]
-------------------------------------
->>> resp.body
-{'took': 1, 'timed_out': False, ...}
-
-# The response isn't a dict, but resp.body is.
->>> isinstance(resp, dict)
-False
-
->>> isinstance(resp.body, dict)
-True
-------------------------------------
-
-Requests that used the `HEAD` HTTP method can still be used within `if` conditions but won't work with `is`.
-
-[source,python]
-------------------------------------
->>> resp = client.indices.exists(index=...)
->>> resp.body
-True
-
->>> resp is True
-False
-
->>> resp.body is True
-True
-
->>> isinstance(resp, bool)
-False
-
->>> isinstance(resp.body, bool)
-True
-------------------------------------
-
-[discrete]
-[[migration-error-types]]
-==== Changes to error classes
-
-Previously `elasticsearch.TransportError` was the base class for both transport layer errors (like timeouts, connection errors) and API layer errors (like "404 Not Found" when accessing an index). This was pretty confusing when you wanted to capture API errors to inspect them for a response body and not capture errors from the transport layer.
-
-Now in 8.0 `elasticsearch.TransportError` is a redefinition of `elastic_transport.TransportError` and will only be the base class for true transport layer errors. If you instead want to capture API layer errors you can use the new `elasticsearch.ApiError` base class.
-
-[source,python]
-------------------------------------
-from elasticsearch import TransportError, Elasticsearch
-
-try:
-    client.indices.get(index="index-that-does-not-exist")
-
-# In elasticsearch-py v7.x this would capture the resulting
-# 'NotFoundError' that would be raised above. But in 8.0.0 this
-# 'except TransportError' won't capture 'NotFoundError'.
-except TransportError as err:
-    print(f"TransportError: {err}")
-------------------------------------
-
-The `elasticsearch.ElasticsearchException` base class has been removed as well. If you'd like to capture all errors that can be raised from the library you can capture both `elasticsearch.ApiError` and `elasticsearch.TransportError`:
-
-[source,python]
-------------------------------------
-from elasticsearch import TransportError, ApiError, Elasticsearch
-
-try:
-    client.search(...)
-# This is the 'except' clause you should use if you *actually* want to
-# capture both Transport errors and API errors in one clause:
-except (ApiError, TransportError) as err:
-    ...
-
-# However I recommend you instead split each error into their own 'except'
-# clause so you can have different behavior for TransportErrors. This
-# construction wasn't possible in 7.x and earlier.
-try:
-    client.search(...)
-except ApiError as err:
-    ... # API errors handled here
-except TransportError as err:
-    ... # Transport errors handled here
-------------------------------------
-
-`elasticsearch.helpers.errors.BulkIndexError` and `elasticsearch.helpers.errors.ScanError` now use `Exception` as a base class instead of `ElasticsearchException`.
-
-Another difference between 7.x and 8.0 errors is their properties. Previously there were `status_code`, `info`, and `error` properties that weren't super useful as they'd be a mix of different value types depending on what the error was and what layer it'd been raised from (transport versus API). You can inspect the error and get response metadata via `meta` and response via `body`` from an `ApiError` instance:
-
-[source,python]
-------------------------------------
-from elasticsearch import ApiError, Elasticsearch
-
-try:
-    client.indices.get(index="index-that-does-not-exist")
-except ApiError as err:
-    print(err.meta.status)
-    # 404
-    print(err.meta.headers)
-    # {'content-length': '200', ...}
-    print(err.body)
-    # {
-    #   'error': {
-    #     'type': 'index_not_found_exception',
-    #     'reason': 'no such index',
-    #     'resource.type': 'index_or_alias',
-    #     ...
-    #   },
-    #   'status': 404
-    # }
-------------------------------------
diff -pruN 8.17.2-2/docs/guide/open-telemetry.asciidoc 9.2.0-1/docs/guide/open-telemetry.asciidoc
--- 8.17.2-2/docs/guide/open-telemetry.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/open-telemetry.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,75 +0,0 @@
-[[opentelemetry]]
-=== Using OpenTelemetry
-
-You can use https://opentelemetry.io/[OpenTelemetry] to monitor the performance and behavior of your {es} requests through the Elasticsearch Python client.
-The Python client comes with built-in OpenTelemetry instrumentation that emits https://www.elastic.co/guide/en/apm/guide/current/apm-distributed-tracing.html[distributed tracing spans] by default.
-With that, applications using https://www.elastic.co/blog/manual-instrumentation-of-python-applications-opentelemetry[manual OpenTelemetry instrumentation] or https://www.elastic.co/blog/auto-instrumentation-of-python-applications-opentelemetry[automatic OpenTelemetry instrumentation] are enriched with additional spans that contain insightful information about the execution of the {es} requests.
-
-The native instrumentation in the Python client follows the https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/[OpenTelemetry Semantic Conventions for {es}]. In particular, the instrumentation in the client covers the logical layer of {es} requests. A single span per request is created that is processed by the service through the Python client. The following image shows a trace that records the handling of two different {es} requests: an `info` request and a `search` request.
-
-[role="screenshot"]
-image::images/otel-waterfall-without-http.png[alt="Distributed trace with Elasticsearch spans",align="center"]
-
-Usually, OpenTelemetry auto-instrumentation modules come with instrumentation support for HTTP-level communication. In this case, in addition to the logical {es} client requests, spans will be captured for the physical HTTP requests emitted by the client. The following image shows a trace with both, {es} spans (in blue) and the corresponding HTTP-level spans (in red) after having installed the ``opentelemetry-instrumentation-urllib3`` package:
-
-[role="screenshot"]
-image::images/otel-waterfall-with-http.png[alt="Distributed trace with Elasticsearch spans",align="center"]
-
-Advanced Python client behavior such as nodes round-robin and request retries are revealed through the combination of logical {es} spans and the physical HTTP spans. The following example shows a `search` request in a scenario with two nodes:
-
-[role="screenshot"]
-image::images/otel-waterfall-retry.png[alt="Distributed trace with Elasticsearch spans",align="center"]
-
-The first node is unavailable and results in an HTTP error, while the retry to the second node succeeds. Both HTTP requests are subsumed by the logical {es} request span (in blue).
-
-[discrete]
-==== Setup the OpenTelemetry instrumentation
-
-When using the https://opentelemetry.io/docs/languages/python/instrumentation/[manual Python OpenTelemetry instrumentation] or the https://opentelemetry.io/docs/languages/python/automatic/[OpenTelemetry Python agent], the Python client's OpenTelemetry instrumentation is enabled by default and uses the global OpenTelemetry SDK with the global tracer provider.
-If you're getting started with OpenTelemetry instrumentation, the following blog posts have step-by-step instructions to ingest and explore tracing data with the Elastic stack:
-
-* https://www.elastic.co/blog/manual-instrumentation-of-python-applications-opentelemetry[Manual instrumentation with OpenTelemetry for Python applications]
-* https://www.elastic.co/blog/auto-instrumentation-of-python-applications-opentelemetry[Automatic instrumentation with OpenTelemetry for Python applications]
-
-[discrete]
-=== Comparison with community instrumentation
-
-The https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/elasticsearch/elasticsearch.html[commmunity OpenTelemetry Elasticsearch instrumentation] also instruments the client and sends OpenTelemetry traces, but was developed before the OpenTelemetry Semantic Conventions for {es}, so the traces attributes are inconsistent with other OpenTelemetry Elasticsearch client instrumentations. To avoid tracing the same requests twice, make sure to use only one instrumentation, either by uninstalling the opentelemetry-instrumentation-elasticsearch Python package or by <<opentelemetry-config-enable,disabling the native instrumentation>>.
-
-[discrete]
-==== Configuring the OpenTelemetry instrumentation
-
-You can configure this OpenTelemetry instrumentation through environment variables.
-The following configuration options are available.
-
-[discrete]
-[[opentelemetry-config-enable]]
-===== Enable / Disable the OpenTelemetry instrumentation
-
-With this configuration option you can enable (default) or disable the built-in OpenTelemetry instrumentation.
-
-**Default:** `true`
-
-|============
-| Environment Variable | `OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_ENABLED`
-|============
-
-[discrete]
-===== Capture search request bodies
-
-Per default, the built-in OpenTelemetry instrumentation does not capture request bodies due to data privacy considerations. You can use this option to enable capturing of search queries from the request bodies of {es} search requests in case you wish to gather this information regardless. The options are to capture the raw search query or not capture it at all.
-
-**Default:** `omit`
-
-**Valid Options:** `omit`, `raw`
-
-|============
-| Environment Variable | `OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_CAPTURE_SEARCH_QUERY`
-|============
-
-[discrete]
-==== Overhead
-
-The OpenTelemetry instrumentation (as any other monitoring approach) may come with a slight overhead on CPU, memory, and/or latency. The overhead may only occur when the instrumentation is enabled (default) and an OpenTelemetry SDK is active in the target application. When the instrumentation is disabled or no OpenTelemetry SDK is active within the target application, monitoring overhead is not expected when using the client.
-
-Even in cases where the instrumentation is enabled and is actively used (by an OpenTelemetry SDK), the overhead is minimal and negligible in the vast majority of cases. In edge cases where there is a noticeable overhead, the <<opentelemetry-config-enable,instrumentation can be explicitly disabled>> to eliminate any potential impact on performance.
diff -pruN 8.17.2-2/docs/guide/overview.asciidoc 9.2.0-1/docs/guide/overview.asciidoc
--- 8.17.2-2/docs/guide/overview.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/overview.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,101 +0,0 @@
-[[overview]]
-== Overview
-
-This is the official low-level Python client for {es}. Its goal is to provide 
-common ground for all {es}-related code in Python. For this reason, the client 
-is designed to be unopinionated and extendable. An API reference is available 
-on https://elasticsearch-py.readthedocs.io[Read the Docs].
-
-
-[discrete]
-=== Compatibility
-
-Language clients are forward compatible; meaning that the clients support
-communicating with greater or equal minor versions of {es} without breaking. It
-does not mean that the clients automatically support new features of newer
-{es} versions; it is only possible after a release of a new client version. For
-example, a 8.12 client version won't automatically support the new features of
-the 8.13 version of {es}, the 8.13 client version is required for that. {es}
-language clients are only backwards compatible with default distributions and
-without guarantees made.
-
-|===
-| Elasticsearch version | elasticsearch-py branch   | Supported
-
-| main                  | main                      | 
-| 8.x                   | 8.x                       | 8.x
-| 7.x                   | 7.x                       | 7.17
-|===
-
-If you have a need to have multiple versions installed at the same time older
-versions are also released as `elasticsearch7` and `elasticsearch8`.
-
-
-[discrete]
-=== Example use
-
-Simple use-case:
-
-[source,python]
-------------------------------------
->>> from datetime import datetime
->>> from elasticsearch import Elasticsearch
-
-# Connect to 'http://localhost:9200'
->>> client = Elasticsearch("http://localhost:9200")
-
-# Datetimes will be serialized:
->>> client.index(index="my-index-000001", id=42, document={"any": "data", "timestamp": datetime.now()})
-{'_id': '42', '_index': 'my-index-000001', '_type': 'test-type', '_version': 1, 'ok': True}
-
-# ...but not deserialized
->>> client.get(index="my-index-000001", id=42)['_source']
-{'any': 'data', 'timestamp': '2013-05-12T19:45:31.804229'}
-------------------------------------
-
-TIP: For an elaborate example of how to ingest data into Elastic Cloud, 
-refer to {cloud}/ec-getting-started-python.html[this page].
-
-
-[discrete]
-=== Features
-
-The client's features include:
-
-* Translating basic Python data types to and from JSON
-
-* Configurable automatic discovery of cluster nodes
-
-* Persistent connections
-
-* Load balancing (with pluggable selection strategy) across all available nodes
-
-* Node timeouts on transient errors
-
-* Thread safety
-
-* Pluggable architecture
-
-The client also contains a convenient set of
-https://elasticsearch-py.readthedocs.org/en/master/helpers.html[helpers] for
-some of the more engaging tasks like bulk indexing and reindexing.
-
-
-[discrete]
-=== Elasticsearch DSL
-
-For a more high level client library with more limited scope, have a look at
-https://elasticsearch-dsl.readthedocs.org/[elasticsearch-dsl] - a more Pythonic library
-sitting on top of `elasticsearch-py`.
-
-It provides a more convenient and idiomatic way to write and manipulate
-https://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html[queries]. It
-stays close to the Elasticsearch JSON DSL, mirroring its terminology and
-structure while exposing the whole range of the DSL from Python either directly
-using defined classes or a queryset-like expressions.
-
-It also provides an optional
-https://elasticsearch-dsl.readthedocs.org/en/latest/persistence.html#doctype[persistence
-layer] for working with documents as Python objects in an ORM-like fashion:
-defining mappings, retrieving and saving documents, wrapping the document data
-in user-defined classes.
diff -pruN 8.17.2-2/docs/guide/release-notes.asciidoc 9.2.0-1/docs/guide/release-notes.asciidoc
--- 8.17.2-2/docs/guide/release-notes.asciidoc	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/guide/release-notes.asciidoc	1970-01-01 00:00:00.000000000 +0000
@@ -1,727 +0,0 @@
-[[release-notes]]
-== Release notes
-
-* <<rn-8-17-2>>
-* <<rn-8-17-1>>
-* <<rn-8-17-0>>
-* <<rn-8-16-0>>
-* <<rn-8-15-1>>
-* <<rn-8-15-0>>
-* <<rn-8-14-0>>
-* <<rn-8-13-2>>
-* <<rn-8-13-1>>
-* <<rn-8-13-0>>
-* <<rn-8-12-1>>
-* <<rn-8-12-0>>
-* <<rn-8-11-1>>
-* <<rn-8-11-0>>
-* <<rn-8-10-1>>
-* <<rn-8-10-0>>
-* <<rn-8-9-0>>
-* <<rn-8-8-1>>
-* <<rn-8-8-0>>
-* <<rn-8-7-0>>
-* <<rn-8-6-2>>
-* <<rn-8-6-1>>
-* <<rn-8-6-0>>
-* <<rn-8-5-3>>
-* <<rn-8-5-2>>
-* <<rn-8-5-1>>
-* <<rn-8-5-0>>
-* <<rn-8-4-3>>
-* <<rn-8-4-2>>
-* <<rn-8-4-1>>
-* <<rn-8-4-0>>
-* <<rn-8-3-3>>
-* <<rn-8-3-2>>
-* <<rn-8-3-1>>
-* <<rn-8-3-0>>
-* <<rn-8-2-3>>
-* <<rn-8-2-2>>
-* <<rn-8-2-1>>
-* <<rn-8-2-0>>
-* <<rn-8-1-2>>
-* <<rn-8-1-1>>
-* <<rn-8-1-0>>
-* <<rn-8-0-0>>
-
-[discrete]
-[[rn-8-17-2]]
-=== 8.17.2 (2025-03-04)
-
-- Explain how to use sub clients in API docs (https://github.com/elastic/elasticsearch-py/pull/2798[#2798])
-- Render descriptions as Markdown in API docs
-- Update APIs
-  * Add `keep_alive` to Submit async search API
-  * Add `Run and Delete` an async ES|QL query APIs
-  * Add Get async ES|QL query results API
-  * Add `include_ccs_metadata` to ES|QL query API
-
-[discrete]
-[[rn-8-17-1]]
-=== 8.17.1 (2025-01-23)
-
-- Make pyarrow dependency optional for tests (https://github.com/elastic/elasticsearch-py/pull/2733[#2733], contributed by danigm)
-- Update APIs:
-  * Add Simulate ingest API
-  * Add Get data stream lifecycle stats API
-  * Add Update inference API
-  * Add Create or update, Get and Delete IP geolocation database configuration APIs
-  * Add Bulk update API keys
-  * Add Get and Update Security index settings APIs
-  * Add OpenID Connect prepare authentication, OpenID Connect authenticate and OpenID Connect logout APIs
-  * Add Delegate PKI authentication API
-  * Add Repository analysis API
-  * Add Render Search Application Query API
-  * Add Find field structure and Find messages structure APIs
-  * Add Get Watcher index settings and Update Watcher index settings APIs
-  * Add experimental Check in and Claim connector sync job APIs
-  * Add experimental Set connector sync job errors and Set connector sync job stats APIs
-  * Add experimental Update connector features APIs
-  * Add experimental Post Event to an Analytics Collection API
-  * Add `timeout` and `master_timeout` to Snapshot lifecycle management (SLM) APIs
-  * Add `allow_partial_search_results` to SQL search API
-  * Add `throttle_period_in_millis` to Create or update watch API
-  * Fix query parameters for CAT APIs
-
-[discrete]
-[[rn-8-17-0]]
-=== 8.17.0 (2024-12-13)
-
-- Allow simsimd again on Python 3.13 (https://github.com/elastic/elasticsearch-py/pull/2722[#2722])
-- Update APIs:
-  * Mark all Inference APIs as stable.
-  * Add `allow_partial_search_results` to the Open Point in Time API
-  * Add `keep_alive` to the Get async search status API
-  * Remove the `keep_alive`, `pre_filter_shard_size` and `scroll` parameters from the Submit async search API. They were never supported.
-  * Add `master_timeout` and `timeout` to all autoscaling policy APIs
-  * Add `master_timeout` to the Alias exists and Get alias APIs
-  * Add `list_executed_pipelines` and `require_data_stream` to Bulk API
-  * Add `include_model_definition` to Get trained models API
-  * Add `meta` to Create data frame analytics API
-  * Add `aggs` to Create datafeeds API
-  * Add `allow_no_indices`, `expand_wildcards`, `ignore_throttled` and `ignore_unavailable` to Create anomaly detection jobs API
-
-[discrete]
-[[rn-8-16-0]]
-=== 8.16.0 (2024-11-12)
-
-- Support Python 3.13 (https://github.com/elastic/elasticsearch-py/pull/2689[#2689])
-- Emit Python warnings for beta and tech preview APIs (https://github.com/elastic/elasticsearch-py/pull/2685[#2685])
-- Vectorstore: use a retriever query for hybrid search (https://github.com/elastic/elasticsearch-py/pull/2682[#2682])
-- Allow retries for statuses other than 429 in streaming bulk (https://github.com/elastic/elasticsearch-py/pull/2702[#2702])
-- Make `BulkIndexError` and `ScanError` serializable (https://github.com/elastic/elasticsearch-py/pull/2700[#2700])
-- Fix import when `trace` is missing from `opentelemetry` package (https://github.com/elastic/elasticsearch-py/pull/2705[#2705])
-- Update APIs:
-  * Fix `nodes` parameter in Task management API
-  * Add Test query rule API
-  * Add Create Cross-Cluster API key and Update Cross-Cluster API key APIs
-  * Add Verify snapshot repository API
-  * Add `data_stream_name` and `settings` to Delete auto-follow pattern API
-  * Add `max_samples_per_key` to Get async EQL status API
-  * Add `lifecycle` and remove unused `data_retention` and `downsampling parameters` from Put data stream lifecycle API
-  * Add `include_remotes` and remove `flat_settings` from Cluster stats API
-  * Add `remote_indices` to Create or update application privileges and Create or update roles APIs
-
-Note that the new Python warnings can be disabled as follows:
-
-[source,python]
-----
-import warnings
-from elasticsearch.exceptions import GeneralAvailabilityWarning
-
-warnings.filterwarnings("ignore", category=GeneralAvailabilityWarning)
-----
-
-[discrete]
-[[rn-8-15-1]]
-=== 8.15.1 (2024-09-08)
-
-- Fix OTel context loss in parallel bulk helper (https://github.com/elastic/elasticsearch-py/pull/2616[#2616])
-- Use request converter to generate python examples (https://github.com/elastic/elasticsearch-py/pull/2645[#2645])
-- Add Geoip database configuration APIs: Create or update, Get and Delete
-- Add `q` parameter to Update by Query API
-- Add `allow_no_indices` and `ignore_unavailable` parameters to Resolve index API
-
-[discrete]
-[[rn-8-15-0]]
-=== 8.15.0 (2024-08-09)
-
-- Added the Connector API (https://github.com/elastic/elasticsearch-py/pull/2623[#2623])
-- Added support for https://www.elastic.co/guide/en/elasticsearch/reference/master/semantic-text.html[semantic_text] and https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-semantic-query.html[semantic query].
-- Added support for sequences of job id, model id and tags where applicable in ML APIs
-- Added `dry_run` and `force` parameters to the Perform inference API
-- Added optional Arrow deserialization support (https://github.com/elastic/elasticsearch-py/pull/2632[#2632])
-- Merged Query Ruleset API into new Query Rules API (https://github.com/elastic/elasticsearch-py/pull/2607[#2607])
-- Added mapping code examples (https://github.com/elastic/elasticsearch-py/pull/2596[#2596])
-- Fixed reference docs (https://github.com/elastic/elasticsearch-py/pull/2629[#2629])
-- Dropped Python 3.7 support (https://github.com/elastic/elasticsearch-py/pull/2618[#2618])
-
-[discrete]
-[[rn-8-14-0]]
-=== 8.14.0 (2024-06-06)
-
-- Fixed `node_pool_class` override (#2581, contributed by Tallak Hellebust)
-- Added `retriever` to the Search API
-- Added `deprecated` and removed `allow_auto_create` from the Create or update component template API
-- Added `allow_auto_create`, `cause`, `deprecated`, `ignore_missing_component_templates` and `master_timeout` to the Create or update index template API
-- Added `cause`, removed `flat_settings` and `timeout` from the Create or update index template legacy API
-- Removed various unsupported parameters from the Simulate index API
-- Added various supported paramters to the Simulate index template API
-- Added the `completion` and `rerank` task types to the Inference APIs
-- Added the `query` and `timeout` parameters to the Perform inference API
-- Added `typed_keys` to the Search Application Search API
-- Added `with_profile_uid` to the Get API key information and Query API key information APIs
-
-
-[discrete]
-[[rn-8-13-2]]
-=== 8.13.2 (2024-05-24)
-
-- Added the `ml.update_trained_model_deployment` API
-- Marked Requests 2.32.2 as incompatible with the Elasticsearch client
-
-[discrete]
-[[rn-8-13-1]]
-=== 8.13.1 (2024-05-03)
-
-- Added `force_synthetic_source` to the Get API
-- Added `wait_for_completion` to the Create trained model API
-- Added `typed_keys` to the Query API key information API
-
-[discrete]
-[[rn-8-13-0]]
-=== 8.13.0 (2024-03-22)
-
-- Added native OpenTelemetry support
-- Added optional `orjson` (a fast, correct JSON library) serialization support
-- Added the `text_structure.test_grok_pattern` API
-- Added the `indices.resolve_cluster` API
-- Renamed the `model_id` parameter to `inference_id` in the `inference` APIs
-- Changed all `synonyms` APIs from **experimental** to **stable**.
-- Fixed API key documentation
-
-[discrete]
-[[rn-8-12-1]]
-=== 8.12.1 (2024-02-22)
-
-- Fixed but deprecated parameter aliases in body parameter
-- Added mappings and bulk to quickstart page
-
-[discrete]
-[[rn-8-12-0]]
-=== 8.12.0 (2024-01-19)
-
-- Dropped support for Python 3.6
-- Allowed unrestricted `body` parameter again
-- Added the Inference APIs
-- Added the ES|QL API
-- Added `active_only` parameter to `security.get_api_key` API
-- Added `expiration` parameter to `security.update_api_key` API
-
-[discrete]
-[[rn-8-11-1]]
-=== 8.11.1 (2023-12-08)
-
-- Added missing `role_templates` to `security.put_role_mapping` API
-- Added interactive examples page to documentation
-- Changed API reference to have one page per sub-client
-
-[discrete]
-[[rn-8-11-0]]
-=== 8.11.0 (2023-11-13)
-
-- Support Python 3.12
-- Added missing `scores` parameter to create trained model vocabulary API
-- Added missing `delete_dest_index` parameter to delete transform API
-
-[discrete]
-[[rn-8-10-1]]
-=== 8.10.1 (2023-10-13)
-
-- Removed deprecation warnings when using `body` parameter
-- Fixed some type hints to use covariant Sequence instead of invariant List
-
-[discrete]
-[[rn-8-10-0]]
-=== 8.10.0 (2023-09-22)
-
-- Added the Query rules APIs
-- Added the Synonyms APIs
-
-[discrete]
-[[rn-8-9-0]]
-=== 8.9.0 (2023-08-10)
-
-- Added the `cluster.info` API
-- Updated the `inference_config` argument in `ml.put_trained_model` API to reflect an improvement in the specification
-
-[discrete]
-[[rn-8-8-1]]
-=== 8.8.1 (2023-07-06)
-
-* Added the `rank` parameter to the `search` API
-
-[discrete]
-[[rn-8-8-0]]
-=== 8.8.0 (2023-05-25)
-
-* Added `include_defaults` parameter to the `cluster.get_component_template`, `indices.get_data_stream`, and `indices.get_index_template` API
-* Added the `indices.delete_data_lifecycle`, `indices.explain_data_lifecycle`, `indices.get_data_lifecycle`, and `indices.put_data_lifecycle` APIs
-* Added the **experimental** `search_application.delete`, `search_application.delete_behavioral_analytics`, `search_application.get`, `search_application.get_behavioral_analytics`, `search_application.list`, `search_application.put`, `search_application.put_behavioral_analytics`, and `search_application.search` APIs.
-
-[discrete]
-[[rn-8-7-0]]
-=== 8.7.0 (2023-04-06)
-
-* Added the `health_report` API
-* Added the `transform.schedule_now_transform` API
-* Added the `from_` request parameter to the `transform.start_transform` API
-* Added the `buffer`, `grid_agg`, and `with_labels` parameters to the `search_mvt` API
-* Added the `allow_auto_create` parameter to the `cluster.create_component_template` API
-* Added the `delete_user_annotations` parameter to the `ml.delete_job`, `ml.reset_job` API
-* Added the `start` and `end` parameters to the `ml.preview_datafeed` API
-* Added the `priority` parameter to the `ml.start_datafeed` API
-* Added the `job_id` parameter to the `ml.update_datafeed` API
-* Added the `model_prune_window` parameter to the `ml.update_job` API
-* Added the `feature_states` parameter to the `snapshot.restore_snapshot` API
-* Added the `timeout` parameter to the `transform.get_transform_stats` API
-* Added the `from_` parameter to the `transform.start_transform` API
-* Changed the `input` parameter of the `ml.put_trained_models` API from required to optional
-* Fixed the `cluster.create_component_template` API by removing the erroneously provided `aliases`, `mappings`, and `settings` parameters. Only the `template` parameter should be used for specifying component templates.
-
-[discrete]
-[[rn-8-6-2]]
-=== 8.6.2 (2023-02-16)
-
-* Client is compatible with Elasticsearch 8.6.2
-
-[discrete]
-[[rn-8-6-1]]
-=== 8.6.1 (2023-01-27)
-
-* Client is compatible with Elasticsearch 8.6.1
-
-[discrete]
-==== Core
-
-* Added the `expand_wildcards`, `preference`, and `routing` parameters to the `open_point_in_time` API.
-
-[discrete]
-[[rn-8-6-0]]
-=== 8.6.0 (2023-01-10)
-
-* Client is compatible with Elasticsearch 8.6.0
-
-[discrete]
-==== Core
-
-* Changed the `fields` parameter of the `field_caps` API to be encoded in the HTTP request body.
-* Changed the `index` parameter of the `rank_eval` API to be optional.
-* Changed the `requests` parameter of the `rank_eval` API to be optional.
-
-[discrete]
-==== CAT
-
-* Added the `time` parameter to the `cat.indices` API
-
-[discrete]
-==== Machine Learning
-
-* Fixed the `model_id` parameter of the `ml.clear_trained_model_deployment_cache` API to be required.
-
-[discrete]
-[[rn-8-5-3]]
-=== 8.5.3 (2022-12-08)
-
-* Client is compatible with Elasticsearch 8.5.3
-
-[discrete]
-[[rn-8-5-2]]
-=== 8.5.2 (2022-11-23)
-
-* Client is compatible with Elasticsearch 8.5.2
-
-[discrete]
-[[rn-8-5-1]]
-=== 8.5.1 (2022-11-21)
-
-* Client is compatible with Elasticsearch 8.5.1
-
-[discrete]
-[[rn-8-5-0]]
-=== 8.5.0 (2022-11-2)
-
-[discrete]
-==== Indices
-
-* Added the **experimental** `indices.downsample` API
-
-[discrete]
-==== Rollup
-
-* Removed the deprecated `rollup.rollup` API.
-
-[discrete]
-==== Snapshot
-
-* Added the `index_names` parameter to the `snapshot.get` API.
-
-[discrete]
-==== Machine Learning
-
-* Added the **beta** `ml.clear_trained_model_deployment_cache` API.
-* Changed the `ml.put_trained_model_definition_part` API from **experimental** to **stable**.
-* Changed the `ml.put_trained_model_vocabulary` API from **experimental** to **stable**.
-* Changed the `ml.start_trained_model_deployment` API from **experimental** to **stable**.
-* Changed the `ml.stop_trained_model_deployment` API from **experimental** to **stable**.
-
-[discrete]
-==== Security
-
-* Added the `with_limited_by` parameter to the `get_api_key` API.
-* Added the `with_limited_by` parameter to the `query_api_keys` API.
-* Added the `with_profile_uid` parameter to the `get_user` API.
-* Changed the `security.activate_user_profile` API from **beta** to **stable**.
-* Changed the `security.disable_user_profile` API from **beta** to **stable**.
-* Changed the `security.enable_user_profile` API from **beta** to **stable**.
-* Changed the `security.get_user_profile` API from **beta** to **stable**.
-* Changed the `security.suggest_user_profiles` API from **beta** to **stable**.
-* Changed the `security.update_user_profile_data` API from **beta** to **stable**.
-* Changed the `security.has_privileges_user_profile` API from **experimental** to **stable**.
-
-[discrete]
-[[rn-8-4-3]]
-=== 8.4.3 (2022-10-06)
-
-* Client is compatible with Elasticsearch 8.4.3
-
-[discrete]
-[[rn-8-4-2]]
-=== 8.4.2 (2022-09-20)
-
-[discrete]
-==== Documents
-
-* Added the `error_trace`, `filter_path`, `human` and `pretty` parameters to the `get_source` API.
-* Added the `ext` parameter to the `search` API.
-
-[discrete]
-==== Async Search
-
-* Added the `ext` parameter to the `async_search.submit` API.
-
-[discrete]
-==== Fleet
-
-* Added the `ext` parameter to the `fleet.search` API.
-
-[discrete]
-[[rn-8-4-1]]
-=== 8.4.1 (2022-09-06)
-
-* Client is compatible with Elasticsearch 8.4.1
-
-[discrete]
-[[rn-8-4-0]]
-=== 8.4.0 (2022-08-25)
-
-[discrete]
-==== Search
-
-* Added the `knn` parameter to the `search` API.
-* Added the `knn` parameter to the `async_search.submit` API.
-
-[discrete]
-==== Machine Learning
-
-* Added the `cache_size` parameter to the `ml.start_trained_model_deployment` API.
-
-[discrete]
-==== Security
-
-* Added the `security.update_api_key` API.
-
-[discrete]
-[[rn-8-3-3]]
-=== 8.3.3 (2022-08-01)
-
-* Client is compatible with Elasticsearch 8.3.3
-
-[discrete]
-[[rn-8-3-2]]
-=== 8.3.2 (2022-08-01)
-
-[discrete]
-==== Security
-
-* Added the `refresh` parameter to the `security.create_service_token` API.
-
-[discrete]
-[[rn-8-3-1]]
-=== 8.3.1 (2022-06-30)
-
-[discrete]
-==== Security
-
-* Added the **experimental** `security.has_privileges_user_profile` API.
-* Added the `hint` parameter to the **experimental** `security.suggest_user_profiles` API.
-
-[discrete]
-[[rn-8-3-0]]
-=== 8.3.0 (2022-06-29)
-
-* Client is compatible with Elasticsearch 8.3.0
-
-[discrete]
-[[rn-8-2-3]]
-=== 8.2.3 (2022-06-15)
-
-[discrete]
-==== Documents
-
-* Added the `routing` parameter to the `msearch` API.
-
-[discrete]
-==== CAT
-
-* Added the `cat.component_templates` API.
-
-[discrete]
-==== Ingest
-
-* Added the `if_version` parameter to the `ingest.put_pipeline` API.
-
-[discrete]
-==== Security
-
-* Changed the `name` parameter for the `security.create_service_token` API from required to optional.
-* Added the `refresh` parameter to the `security.create_service_token` API.
-* Changed the name of `access` parameter to the `labels` parameter in the `security.update_user_profile_data` API.
-
-[discrete]
-==== Shutdown
-
-* Added the `timeout` and `master_timeout` parameters to the `shutdown.get_node`, `shutdown.delete_node`, and `shutdown.put_node` APIs.
-* Added the `reason`, `type`, `allocation_delay`, and `target_node_name` parameters to the `shutdown.put_node` API.
-
-[discrete]
-[[rn-8-2-2]]
-=== 8.2.2 (2022-06-01)
-
-* Client is compatible with Elasticsearch 8.2.2
-
-[discrete]
-[[rn-8-2-1]]
-=== 8.2.1 (2022-06-01)
-
-[discrete]
-==== Machine Learning
-
-* Added the `inference_config` parameter to the `ml.infer_trained_model_deployment` API
-
-[discrete]
-[[rn-8-2-0]]
-=== 8.2.0 (2022-05-03)
-
-[discrete]
-==== Client
-
-* Re-introduced support for passing `requests.auth.BaseAuth` objects to the `http_auth` parameter which was available in 7.x.
-
-[discrete]
-==== Search
-
-* Added the `filter` parameter to the **experimental** `knn_search` API
-
-[discrete]
-==== Documents
-
-* Changed the `source` and `dest` parameters for the `reindex` API from optional to required
-
-[discrete]
-==== Indices
-
-* Added the `indices.field_usage_stats` API
-* Added the `indices.modify_data_stream` API
-* Added the `fields` and `types` parameters to the `field_caps` API
-* Added the `ignore_unvailable` parameter to the `open_point_in_time` API
-* Added the `master_timeout` and `timeout` parameters to the `indices.delete` API
-* Added the `features` parameter to the `indices.get` API
-
-[discrete]
-==== Machine Learning
-
-* Added the `ml.get_memory_stats` API
-
-[discrete]
-==== Migrations
-
-* Added the `migrations.get_feature_upgrade_status` API
-* Added the `migrations.post_feature_upgrade` API
-
-[discrete]
-==== Nodes
-
-* Added the `nodes.clear_repositories_metering_archive` API
-* Added the `nodes.get_repositories_metering_info` API
-
-[discrete]
-==== Security
-
-* Added the **beta** `security.activate_user_profile` API
-* Added the **beta** `security.disable_user_profile` API
-* Added the **beta** `security.enable_user_profile` API
-* Added the **beta** `security.get_user_profile` API
-* Added the **beta** `security.suggest_user_profiles` API
-* Added the **beta** `security.update_user_profile_data` API
-
-[discrete]
-==== SQL
-
-* Added the `catalog`, `index_using_frozen`, `keep_alive`, `keep_on_completion`, `runtime_mappings`, and `wait_for_completion_timeout` parameters to the `sql.query` API
-
-[discrete]
-[[rn-8-1-2]]
-=== 8.1.2 (2022-03-30)
-
-* Client is compatible with Elasticsearch 8.1.2
-
-
-[discrete]
-[[rn-8-1-1]]
-=== 8.1.1 (2022-03-22)
-
-[discrete]
-==== Documents
-
-* Changed the `source` and `dest` parameters of the `reindex` API to be required.
-
-[discrete]
-==== Mappings
-
-* Changed the `fields` parameter of the `field_caps` API to be required.
-
-
-[discrete]
-[[rn-8-1-0]]
-=== 8.1.0 (2022-03-08)
-
-[discrete]
-==== Transforms
-
-* Added the `transform.reset_transform` API
-
-
-[discrete]
-[[rn-8-0-0]]
-=== 8.0.0 (2022-02-10)
-
-[discrete]
-==== Added
-
-* Added the top-level `.options()` method to `Elasticsearch` and `AsyncElasticsearch` for modifying transport options.
-* Added parameters corresponding to JSON request body fields for all APIs
-* Added `basic_auth` parameter for specifying username and password authentication
-* Added `bearer_auth` parameter for specifying an HTTP bearer token or service token
-* Added the `meta` property to `ApiError` to access the HTTP response metadata of an error.
-* Added a check that a compatible version of the `elastic-transport` package is installed.
-
-[discrete]
-==== Changed
-
-* Changed the transport layer to use the `elastic-transport` package
-* Changed user-defined `body` parameters to have semantic names (e.g `index(document={...})` instead of `index(body={...})`).
-* Changed responses to be objects with two properties, `meta` for response metadata (HTTP status, headers, node, etc) and `body` for a typed body.
-* Changed `AsyncElasticsearch` to always be available, regardless of whether `aiohttp` is installed
-* Changed exception hierarchy, the major change is a new exception `ApiError` which differentiates between an error that's raised from the transport layer (previously `elasticsearch.exceptions.TransportError`, now `elastic_transport.TransportError`) and one raised from the API layer
-* Changed the name of `JSONSerializer` to `JsonSerializer` for consistency with other serializer names. Added an alias to the old name for backwards compatibility
-* Changed the default mimetypes (`application/json`) to instead use compatibility mimetypes (`application/vnd.elasticsearch+json`) which always request for responses compatibility with version 8.x.
-
-[discrete]
-==== Removed
-
-* Removed support for Python 2.7 and Python 3.5, the library now supports only Python 3.6+
-* Removed the `elasticsearch.connection` module as all functionality has been moved to the `elastic-transport` package
-* Removed the default URL of `http://localhost:9200` due to Elasticsearch 8.0 default configuration being `https://localhost:9200`.
-  The client's connection to Elasticsearch now must be specified with scheme, host, and port or with the `cloud_id` parameter
-* Removed the ability to use positional arguments with API methods. Going forward all API parameters must be keyword-only parameters
-* Removed the `doc_type`, `include_type_name`, and `copy_settings` parameters from many document and index APIs
-
-[discrete]
-==== Deprecated
-
-* Deprecated the `body` and `params` parameters on all APIs
-* Deprecated setting transport options `http_auth`, `api_key`, `ignore`, `request_timeout`, `headers`, and `opaque_id`
-  All of these settings should instead be set via the `.options()` method
-* Deprecated the `elasticsearch.transport` and `elasticsearch.client` modules. These modules will be removed in a future version
-
-[discrete]
-==== CAT
-
-* Removed the deprecated `local` parameter from the `cat.indices`, `cat.nodes`, `cat.shards` API
-* Removed the deprecated `allow_no_datafeeds` parameter from the `cat.ml_datafeeds` API
-* Removed the deprecated `allow_no_jobs` parameter from the `cat.ml_jobs` API
-* Removed the deprecated `size` parameter from the `cat.thread_pool` API
-* Added the `time` parameter to the `cat.thread_pool` API
-
-[discrete]
-==== Documents
-
-* Removed the deprecated `size` parameter from the `delete_by_query` API
-* Removed the deprecated `size` parameter from the `update_by_query` API
-
-[discrete]
-==== Indices
-
-* Removed the deprecated `indices.flush_synced` API
-* Removed the deprecated `indices.freeze` API
-* Removed the deprecated `indices.get_upgrade` API
-* Removed the deprecated `indices.upgrade` API
-* Removed the deprecated `indices.exist_type` API
-* Removed the deprecated parameter `copy_settings` from the `indices.shrink` API
-* Deprecated the `verbose` parameter of the `indices.segments` API
-
-[discrete]
-==== License / X-Pack
-
-* Deprecated the `accept_enterprise` parameter of the `license.get` API
-* Deprecated the `accept_enterprise` parameter of the `xpack.info` API
-
-[discrete]
-==== Machine Learning
-
-* Added the **experimental** `ml.infer_trained_model_deployment` API
-* Added the **experimental** `ml.put_trained_model_definition_part` API
-* Added the **experimental** `ml.put_trained_model_vocabulary` API
-* Added the **experimental** `ml.start_trained_model_deployment` API
-* Added the **experimental** `ml.stop_trained_model_deployment` API
-* Added the `timeout` parameter to the `ml.delete_trained_model` API
-* Removed the deprecated `allow_no_jobs` parameter from the `ml.close_job` API
-* Removed the deprecated `ml.find_text_structure` API
-* Removed the deprecated `allow_no_datafeeds` parameter from the `ml.get_datafeed_stats` API
-* Removed the deprecated `allow_no_datafeeds` parameter from the `ml.get_datafeeds` API
-* Removed the deprecated `allow_no_jobs` parameter from the `ml.get_job_stats` API
-* Removed the deprecated `allow_no_jobs` parameter from the `ml.get_jobs` API
-* Removed the deprecated `allow_no_jobs` parameter from the `ml.get_overall_buckets` API
-
-[discrete]
-==== Search
-
-* Added the **experimental** `knn_search` API
-
-[discrete]
-==== Searchable Snapshots
-
-* Removed the deprecated `searchable_snapshots.repository_stats` API
-
-[discrete]
-==== Snapshots
-
-* Changed the `snapshot.delete` API to accept multiple snapshots
-
-[discrete]
-==== Security
-
-* Added the `security.enroll_kibana` API
-* Added the `security.enroll_node` API
diff -pruN 8.17.2-2/docs/images/logo-elastic-glyph-color.svg 9.2.0-1/docs/images/logo-elastic-glyph-color.svg
--- 8.17.2-2/docs/images/logo-elastic-glyph-color.svg	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/images/logo-elastic-glyph-color.svg	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+<svg width="205" height="204" viewBox="0 0 205 204" fill="none" xmlns="http://www.w3.org/2000/svg">
+<g clip-path="url(#clip0_1178_2)">
+<path d="M204.58 106.744C204.603 98.4365 202.056 90.3256 197.289 83.5226C192.521 76.7196 185.766 71.5575 177.95 68.7437C178.661 65.1202 179.02 61.4363 179.02 57.7437C179.015 45.5282 175.137 33.6288 167.945 23.7553C160.752 13.8817 150.615 6.54212 138.99 2.79108C127.365 -0.95996 114.849 -0.929399 103.242 2.87837C91.6356 6.68615 81.5344 14.0751 74.3903 23.9837C69.1179 19.9113 62.6636 17.6651 56.0021 17.5844C49.3406 17.5036 42.8337 19.5926 37.4641 23.536C32.0946 27.4793 28.1539 33.0628 26.2374 39.4431C24.3208 45.8235 24.5325 52.6542 26.8403 58.9037C19.0148 61.7531 12.2486 66.929 7.45072 73.7362C2.6528 80.5433 0.0529206 88.6558 0.000313645 96.9837C-0.0326102 105.33 2.52727 113.48 7.32627 120.309C12.1253 127.138 18.9265 132.307 26.7903 135.104C25.1677 143.453 25.4123 152.057 27.5064 160.301C29.6005 168.544 33.4924 176.222 38.903 182.784C44.3136 189.347 51.1089 194.631 58.8019 198.258C66.495 201.885 74.8951 203.765 83.4003 203.764C92.5559 203.772 101.581 201.59 109.722 197.402C117.863 193.213 124.884 187.138 130.2 179.684C135.455 183.802 141.912 186.091 148.588 186.201C155.264 186.312 161.793 184.238 167.181 180.295C172.569 176.353 176.522 170.758 178.437 164.362C180.352 157.965 180.125 151.119 177.79 144.864C185.623 142.013 192.394 136.832 197.193 130.016C201.992 123.201 204.587 115.079 204.63 106.744" fill="white"/>
+<path d="M80.4304 87.7437L125.2 108.154L170.36 68.5837C172.647 57.1747 170.923 45.326 165.48 35.0418C160.036 24.7576 151.208 16.6692 140.487 12.1447C129.767 7.62016 117.813 6.9373 106.647 10.2116C95.4817 13.4859 85.7895 20.5163 79.2104 30.1137L71.6904 69.1137L80.4304 87.7437Z" fill="#FEC514"/>
+<path d="M34.1005 135.154C31.7687 146.616 33.4787 158.533 38.9397 168.877C44.4007 179.221 53.2757 187.355 64.0559 191.896C74.836 196.436 86.856 197.103 98.0722 193.783C109.288 190.463 119.009 183.36 125.581 173.684L133.031 134.844L123.031 115.844L78.1405 95.3438L34.1005 135.154Z" fill="#02BCB7"/>
+<path d="M33.7903 57.6838L64.4903 64.9238L71.2103 30.0437C67.0362 26.8839 61.9516 25.1598 56.7165 25.129C51.4814 25.0981 46.3769 26.7623 42.1659 29.8727C37.9549 32.9831 34.8636 37.3728 33.3539 42.3856C31.8442 47.3984 31.9973 52.7652 33.7903 57.6838Z" fill="#F04E98"/>
+<path d="M31.1505 64.9837C24.5185 67.1748 18.727 71.3643 14.5705 76.9775C10.4141 82.5907 8.09631 89.3525 7.93527 96.3353C7.77423 103.318 9.7778 110.179 13.6711 115.978C17.5643 121.777 23.1566 126.229 29.6805 128.724L72.6805 89.8137L64.7905 72.9837L31.1505 64.9837Z" fill="#1BA9F5"/>
+<path d="M133.44 173.684C137.012 176.435 141.284 178.128 145.77 178.572C150.256 179.016 154.777 178.191 158.818 176.193C162.859 174.195 166.259 171.103 168.63 167.269C171.001 163.434 172.248 159.012 172.23 154.504C172.243 151.636 171.749 148.789 170.77 146.094L140.12 138.924L133.44 173.684Z" fill="#9ADC30"/>
+<path d="M139.68 130.894L173.43 138.784C180.166 136.513 186.025 132.197 190.191 126.437C194.357 120.678 196.622 113.762 196.67 106.654C196.664 99.8008 194.573 93.112 190.676 87.4751C186.779 81.8383 181.259 77.5201 174.85 75.0938L130.72 113.764L139.68 130.894Z" fill="#0B64DD"/>
+</g>
+<defs>
+<clipPath id="clip0_1178_2">
+<rect width="205" height="204" fill="white"/>
+</clipPath>
+</defs>
+</svg>
diff -pruN 8.17.2-2/docs/logo-elastic-glyph-color.svg 9.2.0-1/docs/logo-elastic-glyph-color.svg
--- 8.17.2-2/docs/logo-elastic-glyph-color.svg	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/logo-elastic-glyph-color.svg	1970-01-01 00:00:00.000000000 +0000
@@ -1,16 +0,0 @@
-<svg width="205" height="204" viewBox="0 0 205 204" fill="none" xmlns="http://www.w3.org/2000/svg">
-<g clip-path="url(#clip0_1178_2)">
-<path d="M204.58 106.744C204.603 98.4365 202.056 90.3256 197.289 83.5226C192.521 76.7196 185.766 71.5575 177.95 68.7437C178.661 65.1202 179.02 61.4363 179.02 57.7437C179.015 45.5282 175.137 33.6288 167.945 23.7553C160.752 13.8817 150.615 6.54212 138.99 2.79108C127.365 -0.95996 114.849 -0.929399 103.242 2.87837C91.6356 6.68615 81.5344 14.0751 74.3903 23.9837C69.1179 19.9113 62.6636 17.6651 56.0021 17.5844C49.3406 17.5036 42.8337 19.5926 37.4641 23.536C32.0946 27.4793 28.1539 33.0628 26.2374 39.4431C24.3208 45.8235 24.5325 52.6542 26.8403 58.9037C19.0148 61.7531 12.2486 66.929 7.45072 73.7362C2.6528 80.5433 0.0529206 88.6558 0.000313645 96.9837C-0.0326102 105.33 2.52727 113.48 7.32627 120.309C12.1253 127.138 18.9265 132.307 26.7903 135.104C25.1677 143.453 25.4123 152.057 27.5064 160.301C29.6005 168.544 33.4924 176.222 38.903 182.784C44.3136 189.347 51.1089 194.631 58.8019 198.258C66.495 201.885 74.8951 203.765 83.4003 203.764C92.5559 203.772 101.581 201.59 109.722 197.402C117.863 193.213 124.884 187.138 130.2 179.684C135.455 183.802 141.912 186.091 148.588 186.201C155.264 186.312 161.793 184.238 167.181 180.295C172.569 176.353 176.522 170.758 178.437 164.362C180.352 157.965 180.125 151.119 177.79 144.864C185.623 142.013 192.394 136.832 197.193 130.016C201.992 123.201 204.587 115.079 204.63 106.744" fill="white"/>
-<path d="M80.4304 87.7437L125.2 108.154L170.36 68.5837C172.647 57.1747 170.923 45.326 165.48 35.0418C160.036 24.7576 151.208 16.6692 140.487 12.1447C129.767 7.62016 117.813 6.9373 106.647 10.2116C95.4817 13.4859 85.7895 20.5163 79.2104 30.1137L71.6904 69.1137L80.4304 87.7437Z" fill="#FEC514"/>
-<path d="M34.1005 135.154C31.7687 146.616 33.4787 158.533 38.9397 168.877C44.4007 179.221 53.2757 187.355 64.0559 191.895C74.836 196.436 86.856 197.103 98.0722 193.783C109.288 190.463 119.009 183.36 125.581 173.684L133.031 134.844L123.031 115.844L78.1405 95.3437L34.1005 135.154Z" fill="#00BFB3"/>
-<path d="M33.7903 57.6837L64.4903 64.9237L71.2103 30.0437C67.0362 26.8839 61.9516 25.1598 56.7165 25.1289C51.4814 25.0981 46.3769 26.7623 42.1659 29.8727C37.9549 32.9831 34.8636 37.3728 33.3539 42.3856C31.8442 47.3984 31.9973 52.7651 33.7903 57.6837Z" fill="#F04E98"/>
-<path d="M31.1505 64.9837C24.5185 67.1748 18.727 71.3643 14.5705 76.9775C10.4141 82.5907 8.09631 89.3525 7.93527 96.3353C7.77423 103.318 9.7778 110.179 13.6711 115.978C17.5643 121.777 23.1566 126.229 29.6805 128.724L72.6805 89.8137L64.7905 72.9837L31.1505 64.9837Z" fill="#1BA9F5"/>
-<path d="M133.44 173.684C137.012 176.435 141.284 178.128 145.77 178.572C150.256 179.016 154.777 178.191 158.818 176.193C162.859 174.195 166.259 171.103 168.63 167.269C171.001 163.434 172.248 159.012 172.23 154.504C172.243 151.636 171.749 148.789 170.77 146.094L140.12 138.924L133.44 173.684Z" fill="#93C90E"/>
-<path d="M139.68 130.894L173.43 138.784C180.166 136.513 186.025 132.197 190.191 126.437C194.357 120.678 196.622 113.762 196.67 106.654C196.664 99.8008 194.573 93.1119 190.676 87.4751C186.779 81.8382 181.259 77.52 174.85 75.0937L130.72 113.764L139.68 130.894Z" fill="#0077CC"/>
-</g>
-<defs>
-<clipPath id="clip0_1178_2">
-<rect width="205" height="204" fill="white"/>
-</clipPath>
-</defs>
-</svg>
diff -pruN 8.17.2-2/docs/reference/async.md 9.2.0-1/docs/reference/async.md
--- 8.17.2-2/docs/reference/async.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/async.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,128 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/async.html
+---
+
+# Using with asyncio [async]
+
+The `elasticsearch` package supports async/await with [asyncio](https://docs.python.org/3/library/asyncio.html) and [aiohttp](https://docs.aiohttp.org). You can either install `aiohttp` directly or use the `[async]` extra:
+
+```bash
+$ python -m pip install elasticsearch aiohttp
+
+# - OR -
+
+$ python -m pip install elasticsearch[async]
+```
+
+
+## Getting Started with Async [_getting_started_with_async]
+
+After installation all async API endpoints are available via `~elasticsearch.AsyncElasticsearch` and are used in the same way as other APIs, with an extra `await`:
+
+```python
+import asyncio
+from elasticsearch import AsyncElasticsearch
+
+client = AsyncElasticsearch()
+
+async def main():
+    resp = await client.search(
+        index="documents",
+        body={"query": {"match_all": {}}},
+        size=20,
+    )
+    print(resp)
+
+asyncio.run(main())
+```
+
+All APIs that are available under the sync client are also available under the async client.
+
+[Reference documentation](https://elasticsearch-py.readthedocs.io/en/latest/async.html#api-reference)
+
+
+## ASGI Applications and Elastic APM [_asgi_applications_and_elastic_apm]
+
+[ASGI](https://asgi.readthedocs.io) (Asynchronous Server Gateway Interface) is a way to serve Python web applications making use of async I/O to achieve better performance. Some examples of ASGI frameworks include FastAPI, Django 3.0+, and Starlette. If you’re using one of these frameworks along with Elasticsearch then you should be using `~elasticsearch.AsyncElasticsearch` to avoid blocking the event loop with synchronous network calls for optimal performance.
+
+[Elastic APM](apm-agent-python://reference/index.md) also supports tracing of async Elasticsearch queries just the same as synchronous queries. For an example on how to configure `AsyncElasticsearch` with a popular ASGI framework [FastAPI](https://fastapi.tiangolo.com/) and APM tracing there is a [pre-built example](https://github.com/elastic/elasticsearch-py/tree/master/examples/fastapi-apm) in the `examples/fastapi-apm` directory.
+
+See also the [Using OpenTelemetry](/reference/opentelemetry.md) page.
+
+## Trio support
+
+If you prefer using Trio instead of asyncio to take advantage of its better structured concurrency support, you can use the HTTPX async node which supports Trio out of the box.
+
+```python
+import trio
+from elasticsearch import AsyncElasticsearch
+
+client = AsyncElasticsearch(
+    "https://...",
+    api_key="...",
+    node_class="httpxasync")
+
+async def main():
+    resp = await client.info()
+    print(resp.body)
+
+trio.run(main)
+```
+
+The one limitation of Trio support is that it does not currently support node sniffing, which was not implemented with structured concurrency in mind.
+
+## Frequently Asked Questions [_frequently_asked_questions]
+
+
+### ValueError when initializing `AsyncElasticsearch`? [_valueerror_when_initializing_asyncelasticsearch]
+
+If when trying to use `AsyncElasticsearch` you receive `ValueError: You must have 'aiohttp' installed to use AiohttpHttpNode` you should ensure that you have `aiohttp` installed in your environment (check with `$ python -m pip freeze | grep aiohttp`). Otherwise, async support won’t be available.
+
+
+### What about the `elasticsearch-async` package? [_what_about_the_elasticsearch_async_package]
+
+Previously asyncio was supported separately via the [elasticsearch-async](https://github.com/elastic/elasticsearch-py-async) package. The `elasticsearch-async` package has been deprecated in favor of `AsyncElasticsearch` provided by the `elasticsearch` package in v7.8 and onwards.
+
+
+### Receiving *Unclosed client session / connector* warning? [_receiving_unclosed_client_session_connector_warning]
+
+This warning is created by `aiohttp` when an open HTTP connection is garbage collected. You’ll typically run into this when closing your application. To resolve the issue ensure that `~elasticsearch.AsyncElasticsearch.close` is called before the `~elasticsearch.AsyncElasticsearch` instance is garbage collected.
+
+For example if using FastAPI that might look like this:
+
+```python
+import os
+from contextlib import asynccontextmanager
+
+from fastapi import FastAPI
+from elasticsearch import AsyncElasticsearch
+
+ELASTICSEARCH_URL = os.environ["ELASTICSEARCH_URL"]
+client = None
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+    global client
+    client = AsyncElasticsearch(ELASTICSEARCH_URL)
+    yield
+    await client.close()
+
+app = FastAPI(lifespan=lifespan)
+
+@app.get("/")
+async def main():
+    return await client.info()
+```
+
+You can run this example by saving it to `main.py` and executing `ELASTICSEARCH_URL=http://localhost:9200 uvicorn main:app`.
+
+
+## Async Helpers [_async_helpers]
+
+Async variants of all helpers are available in `elasticsearch.helpers` and are all prefixed with `async_*`. You’ll notice that these APIs are identical to the ones in the sync [*Client helpers*](/reference/client-helpers.md) documentation.
+
+All async helpers that accept an iterator or generator also accept async iterators and async generators.
+
+[Reference documentation](https://elasticsearch-py.readthedocs.io/en/latest/async.html#api-reference)
+
diff -pruN 8.17.2-2/docs/reference/client-helpers.md 9.2.0-1/docs/reference/client-helpers.md
--- 8.17.2-2/docs/reference/client-helpers.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/client-helpers.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,72 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/client-helpers.html
+---
+
+# Client helpers [client-helpers]
+
+You can find here a collection of simple helper functions that abstract some specifics of the raw API.
+
+
+## Bulk helpers [bulk-helpers]
+
+There are several helpers for the bulk API since its requirement for specific formatting and other considerations can make it cumbersome if used directly.
+
+All bulk helpers accept an instance of `Elasticsearch` class and an iterable `action` (any iterable, can also be a generator, which is ideal in most cases since it allows you to index large datasets without the need of loading them into memory).
+
+The items in the iterable `action` should be the documents we wish to index in several formats. The most common one is the same as returned by `search()`, for example:
+
+```yaml
+{
+  '_index': 'index-name',
+  '_id': 42,
+  '_routing': 5,
+  'pipeline': 'my-ingest-pipeline',
+  '_source': {
+    "title": "Hello World!",
+    "body": "..."
+  }
+}
+```
+
+Alternatively, if `_source` is not present, it pops all metadata fields from the doc and use the rest as the document data:
+
+```yaml
+{
+  "_id": 42,
+  "_routing": 5,
+  "title": "Hello World!",
+  "body": "..."
+}
+```
+
+The `bulk()` api accepts `index`, `create`, `delete`, and `update` actions. Use the `_op_type` field to specify an action (`_op_type` defaults to `index`):
+
+```yaml
+{
+  '_op_type': 'delete',
+  '_index': 'index-name',
+  '_id': 42,
+}
+{
+  '_op_type': 'update',
+  '_index': 'index-name',
+  '_id': 42,
+  'doc': {'question': 'The life, universe and everything.'}
+}
+```
+
+
+## Scan [scan]
+
+Simple abstraction on top of the `scroll()` API - a simple iterator that yields all hits as returned by underlining scroll requests.
+
+By default scan does not return results in any pre-determined order. To have a standard order in the returned documents (either by score or explicit sort definition) when scrolling, use `preserve_order=True`. This may be an expensive operation and will negate the performance benefits of using `scan`.
+
+```py
+scan(es,
+    query={"query": {"match": {"title": "python"}}},
+    index="orders-*"
+)
+```
+
diff -pruN 8.17.2-2/docs/reference/configuration.md 9.2.0-1/docs/reference/configuration.md
--- 8.17.2-2/docs/reference/configuration.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/configuration.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,416 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/config.html
+navigation_title: Configuration
+---
+
+# Python client configuration for {{es}} [config]
+
+This page contains information about the most important configuration options of the Python {{es}} client.
+
+
+## TLS/SSL [tls-and-ssl]
+
+The options in this section can only be used when the node is configured for HTTPS. An error will be raised if using these options with an HTTP node.
+
+
+### Verifying server certificates [_verifying_server_certificates]
+
+The typical route to verify a cluster certificate is via a "CA bundle" which can be specified via the `ca_certs` parameter. If no options are given and the [certifi package](https://github.com/certifi/python-certifi) is installed then certifi’s CA bundle is used by default.
+
+If you have your own CA bundle to use you can configure via the `ca_certs` parameter:
+
+```python
+client = Elasticsearch(
+    "https://...",
+    ca_certs="/path/to/certs.pem"
+)
+```
+
+If using a generated certificate or certificate with a known fingerprint you can use the `ssl_assert_fingerprint` to specify the fingerprint which tries to match the server’s leaf certificate during the TLS handshake. If there is any matching certificate the connection is verified, otherwise a `TlsError` is raised.
+
+In Python 3.9 and earlier only the leaf certificate will be verified but in Python 3.10+ private APIs are used to verify any certificate in the certificate chain. This helps when using certificates that are generated on a multi-node cluster.
+
+```python
+client = Elasticsearch(
+    "https://...",
+    ssl_assert_fingerprint=(
+        "315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3"
+    )
+)
+```
+
+To disable certificate verification use the `verify_certs=False` parameter. This option should be avoided in production, instead use the other options to verify the clusters' certificate.
+
+```python
+client = Elasticsearch(
+    "https://...",
+    verify_certs=False
+)
+```
+
+
+### TLS versions [_tls_versions]
+
+Configuring the minimum TLS version to connect to is done via the `ssl_version` parameter. By default this is set to a minimum value of TLSv1.2. Use the `ssl.TLSVersion` enumeration to specify versions.
+
+```python
+import ssl
+
+client = Elasticsearch(
+    ...,
+    ssl_version=ssl.TLSVersion.TLSv1_2
+)
+```
+
+
+### Client TLS certificate authentication [_client_tls_certificate_authentication]
+
+Elasticsearch can be configured to authenticate clients via TLS client certificates. Client certificate and keys can be configured via the `client_cert` and `client_key` parameters:
+
+```python
+client = Elasticsearch(
+    ...,
+    client_cert="/path/to/cert.pem",
+    client_key="/path/to/key.pem",
+)
+```
+
+
+### Using an SSLContext [_using_an_sslcontext]
+
+For advanced users an `ssl.SSLContext` object can be used for configuring TLS via the `ssl_context` parameter. The `ssl_context` parameter can’t be combined with any other TLS options except for the `ssl_assert_fingerprint` parameter.
+
+```python
+import ssl
+
+# Create and configure an SSLContext
+ctx = ssl.create_default_context()
+ctx.load_verify_locations(...)
+
+client = Elasticsearch(
+    ...,
+    ssl_context=ctx
+)
+```
+
+
+## HTTP compression [compression]
+
+Compression of HTTP request and response bodies can be enabled with the `http_compress` parameter. If enabled then HTTP request bodies will be compressed with `gzip` and HTTP responses will include the `Accept-Encoding: gzip` HTTP header. By default compression is disabled.
+
+```python
+client = Elasticsearch(
+    ...,
+    http_compress=True  # Enable compression!
+)
+```
+
+HTTP compression is recommended to be enabled when requests are traversing the network. Compression is automatically enabled when connecting to Elastic Cloud.
+
+
+## Request timeouts [timeouts]
+
+Requests can be configured to timeout if taking too long to be serviced. The `request_timeout` parameter can be passed via the client constructor or the client `.options()` method. When the request times out the node will raise a `ConnectionTimeout` exception which can trigger retries.
+
+Setting `request_timeout` to `None` will disable timeouts.
+
+```python
+client = Elasticsearch(
+    ...,
+    request_timeout=10  # 10 second timeout
+)
+
+# Search request will timeout in 5 seconds
+client.options(request_timeout=5).search(...)
+```
+
+
+### API and server timeouts [_api_and_server_timeouts]
+
+There are API-level timeouts to take into consideration when making requests which can cause the request to timeout on server-side rather than client-side. You may need to configure both a transport and API level timeout for long running operations.
+
+In the example below there are three different configurable timeouts for the `cluster.health` API all with different meanings for the request:
+
+```python
+client.options(
+    # Amount of time to wait for an HTTP response to start.
+    request_timeout=30
+).cluster.health(
+    # Amount of time to wait to collect info on all nodes.
+    timeout=30,
+    # Amount of time to wait for info from the master node.
+    master_timeout=10,
+)
+```
+
+
+## Retries [retries]
+
+Requests can be retried if they don’t return with a successful response. This provides a way for requests to be resilient against transient failures or overloaded nodes.
+
+The maximum number of retries per request can be configured via the `max_retries` parameter. Setting this parameter to 0 disables retries. This parameter can be set in the client constructor or per-request via the client `.options()` method:
+
+```python
+client = Elasticsearch(
+    ...,
+    max_retries=5
+)
+
+# For this API request we disable retries with 'max_retries=0'
+client.options(max_retries=0).index(
+    index="blogs",
+    document={
+        "title": "..."
+    }
+)
+```
+
+
+### Retrying on connection errors and timeouts [_retrying_on_connection_errors_and_timeouts]
+
+Connection errors are automatically retried if retries are enabled. Retrying requests on connection timeouts can be enabled or disabled via the `retry_on_timeout` parameter. This parameter can be set on the client constructor or via the client `.options()` method:
+
+```python
+client = Elasticsearch(
+    ...,
+    retry_on_timeout=True
+)
+client.options(retry_on_timeout=False).info()
+```
+
+
+### Retrying status codes [_retrying_status_codes]
+
+By default if retries are enabled `retry_on_status` is set to `(429, 502, 503, 504)`. This parameter can be set on the client constructor or via the client `.options()` method. Setting this value to `()` will disable the default behavior.
+
+```python
+client = Elasticsearch(
+    ...,
+    retry_on_status=()
+)
+
+# Retry this API on '500 Internal Error' statuses
+client.options(retry_on_status=[500]).index(
+    index="blogs",
+    document={
+        "title": "..."
+    }
+)
+```
+
+
+### Ignoring status codes [_ignoring_status_codes]
+
+By default an `ApiError` exception will be raised for any non-2XX HTTP requests that exhaust retries, if any. If you’re expecting an HTTP error from the API but aren’t interested in raising an exception you can use the `ignore_status` parameter via the client `.options()` method.
+
+A good example where this is useful is setting up or cleaning up resources in a cluster in a robust way:
+
+```python
+client = Elasticsearch(...)
+
+# API request is robust against the index not existing:
+resp = client.options(ignore_status=404).indices.delete(index="delete-this")
+resp.meta.status  # Can be either '2XX' or '404'
+
+# API request is robust against the index already existing:
+resp = client.options(ignore_status=[400]).indices.create(
+    index="create-this",
+    mapping={
+        "properties": {"field": {"type": "integer"}}
+    }
+)
+resp.meta.status  # Can be either '2XX' or '400'
+```
+
+When using the `ignore_status` parameter the error response will be returned serialized just like a non-error response. In these cases it can be useful to inspect the HTTP status of the response. To do this you can inspect the `resp.meta.status`.
+
+
+## Sniffing for new nodes [sniffing]
+
+Additional nodes can be discovered by a process called "sniffing" where the client will query the cluster for more nodes that can handle requests.
+
+Sniffing can happen at three different times: on client instantiation, before requests, and on a node failure. These three behaviors can be enabled and disabled with the `sniff_on_start`, `sniff_before_requests`, and `sniff_on_node_failure` parameters.
+
+::::{important}
+When using an HTTP load balancer or proxy you cannot use sniffing functionality as the cluster would supply the client with IP addresses to directly connect to the cluster, circumventing the load balancer. Depending on your configuration this might be something you don’t want or break completely.
+::::
+
+
+
+### Waiting between sniffing attempts [_waiting_between_sniffing_attempts]
+
+To avoid needlessly sniffing too often there is a delay between attempts to discover new nodes. This value can be controlled via the `min_delay_between_sniffing` parameter.
+
+
+### Filtering nodes which are sniffed [_filtering_nodes_which_are_sniffed]
+
+By default nodes which are marked with only a `master` role will not be used. To change the behavior the parameter `sniffed_node_callback` can be used. To mark a sniffed node not to be added to the node pool return `None` from the `sniffed_node_callback`, otherwise return a `NodeConfig` instance.
+
+```python
+from typing import Optional, Dict, Any
+from elastic_transport import NodeConfig
+from elasticsearch import Elasticsearch
+
+def filter_master_eligible_nodes(
+    node_info: Dict[str, Any],
+    node_config: NodeConfig
+) -> Optional[NodeConfig]:
+    # This callback ignores all nodes that are master eligible
+    # instead of master-only nodes (default behavior)
+    if "master" in node_info.get("roles", ()):
+        return None
+    return node_config
+
+client = Elasticsearch(
+    "https://localhost:9200",
+    sniffed_node_callback=filter_master_eligible_nodes
+)
+```
+
+The `node_info` parameter is part of the response from the `nodes.info()` API, below is an example of what that object looks like:
+
+```json
+{
+  "name": "SRZpKFZ",
+  "transport_address": "127.0.0.1:9300",
+  "host": "127.0.0.1",
+  "ip": "127.0.0.1",
+  "version": "5.0.0",
+  "build_hash": "253032b",
+  "roles": ["master", "data", "ingest"],
+  "http": {
+    "bound_address": ["[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200"],
+    "publish_address": "1.1.1.1:123",
+    "max_content_length_in_bytes": 104857600
+  }
+}
+```
+
+
+## Node Pool [node-pool]
+
+
+### Selecting a node from the pool [_selecting_a_node_from_the_pool]
+
+You can specify a node selector pattern via the `node_selector_class` parameter. The supported values are `round_robin` and `random`. Default is `round_robin`.
+
+```python
+client = Elasticsearch(
+    ...,
+    node_selector_class="round_robin"
+)
+```
+
+Custom selectors are also supported:
+
+```python
+from elastic_transport import NodeSelector
+
+class CustomSelector(NodeSelector):
+    def select(nodes): ...
+
+client = Elasticsearch(
+    ...,
+    node_selector_class=CustomSelector
+)
+```
+
+
+### Marking nodes dead and alive [_marking_nodes_dead_and_alive]
+
+Individual nodes of Elasticsearch may have transient connectivity or load issues which may make them unable to service requests. To combat this the pool of nodes will detect when a node isn’t able to service requests due to transport or API errors.
+
+After a node has been timed out it will be moved back to the set of "alive" nodes but only after the node returns a successful response will the node be marked as "alive" in terms of consecutive errors.
+
+The `dead_node_backoff_factor` and `max_dead_node_backoff` parameters can be used to configure how long the node pool will put the node into timeout with each consecutive failure. Both parameters use a unit of seconds.
+
+The calculation is equal to `min(dead_node_backoff_factor * (2 ** (consecutive_failures - 1)), max_dead_node_backoff)`.
+
+
+## Serializers [serializer]
+
+Serializers transform bytes on the wire into native Python objects and vice-versa. By default the client ships with serializers for `application/json`, `application/x-ndjson`, `text/*`, `application/vnd.apache.arrow.stream` and `application/mapbox-vector-tile`.
+
+You can define custom serializers via the `serializers` parameter:
+
+```python
+from elasticsearch import Elasticsearch, JsonSerializer
+
+class JsonSetSerializer(JsonSerializer):
+    """Custom JSON serializer that handles Python sets"""
+    def default(self, data: Any) -> Any:
+        if isinstance(data, set):
+            return list(data)
+        return super().default(data)
+
+client = Elasticsearch(
+    ...,
+    # Serializers are a mapping of 'mimetype' to Serializer class.
+    serializers={"application/json": JsonSetSerializer()}
+)
+```
+
+If the `orjson` package is installed, you can use the faster ``OrjsonSerializer`` for the default mimetype (``application/json``):
+
+```python
+from elasticsearch import Elasticsearch, OrjsonSerializer
+
+es = Elasticsearch(
+    ...,
+    serializer=OrjsonSerializer()
+)
+```
+
+orjson is particularly fast when serializing vectors as it has native numpy support. This will be the default in a future release. Note that you can install orjson with the `orjson` extra:
+
+```sh
+$ python -m pip install elasticsearch[orjson]
+```
+
+
+## Nodes [nodes]
+
+
+### Node implementations [_node_implementations]
+
+The default node class for synchronous I/O is `urllib3` and the default node class for asynchronous I/O is `aiohttp`.
+
+For all of the built-in HTTP node implementations like `urllib3`, `requests`, and `aiohttp` you can specify with a simple string to the `node_class` parameter:
+
+```python
+from elasticsearch import Elasticsearch
+
+client = Elasticsearch(
+    ...,
+    node_class="requests"
+)
+```
+
+You can also specify a custom node implementation via the `node_class` parameter:
+
+```python
+from elasticsearch import Elasticsearch
+from elastic_transport import Urllib3HttpNode
+
+class CustomHttpNode(Urllib3HttpNode):
+    ...
+
+client = Elasticsearch(
+    ...
+    node_class=CustomHttpNode
+)
+```
+
+
+### HTTP connections per node [_http_connections_per_node]
+
+Each node contains its own pool of HTTP connections to allow for concurrent requests. This value is configurable via the `connections_per_node` parameter:
+
+```python
+client = Elasticsearch(
+    ...,
+    connections_per_node=5
+)
+```
+
diff -pruN 8.17.2-2/docs/reference/connecting.md 9.2.0-1/docs/reference/connecting.md
--- 8.17.2-2/docs/reference/connecting.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/connecting.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,350 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/connecting.html
+---
+
+# Connecting [connecting]
+
+This page contains the information you need to connect the Client with {{es}}.
+
+
+## Connecting to Elastic Cloud [connect-ec]
+
+[Elastic Cloud](docs-content://deploy-manage/deploy/elastic-cloud/cloud-hosted.md) is the easiest way to get started with {{es}}. When connecting to Elastic Cloud with the Python {{es}} client you should always use the `cloud_id` parameter to connect. You can find this value within the "Manage Deployment" page after you’ve created a cluster (look in the top-left if you’re in Kibana).
+
+We recommend using a Cloud ID whenever possible because your client will be automatically configured for optimal use with Elastic Cloud including HTTPS and HTTP compression.
+
+```python
+from elasticsearch import Elasticsearch
+
+# Password for the 'elastic' user generated by Elasticsearch
+ELASTIC_PASSWORD = "<password>"
+
+# Found in the 'Manage Deployment' page
+CLOUD_ID = "deployment-name:dXMtZWFzdDQuZ2Nw..."
+
+# Create the client instance
+client = Elasticsearch(
+    cloud_id=CLOUD_ID,
+    basic_auth=("elastic", ELASTIC_PASSWORD)
+)
+
+# Successful response!
+client.info()
+# {'name': 'instance-0000000000', 'cluster_name': ...}
+```
+
+
+## Connecting to a self-managed cluster [connect-self-managed-new]
+
+By default {{es}} will start with security features like authentication and TLS enabled. To connect to the {{es}} cluster you’ll need to configure the Python {{es}} client to use HTTPS with the generated CA certificate in order to make requests successfully.
+
+If you’re just getting started with {{es}} we recommend reading the documentation on [configuring](docs-content://deploy-manage/deploy/self-managed/configure-elasticsearch.md) and [starting {{es}}](docs-content://deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md) to ensure your cluster is running as expected.
+
+When you start {{es}} for the first time you’ll see a distinct block like the one below in the output from {{es}} (you may have to scroll up if it’s been a while):
+
+```sh
+----------------------------------------------------------------
+-> Elasticsearch security features have been automatically configured!
+-> Authentication is enabled and cluster connections are encrypted.
+
+->  Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`):
+  lhQpLELkjkrawaBoaz0Q
+
+->  HTTP CA certificate SHA-256 fingerprint:
+  a52dd93511e8c6045e21f16654b77c9ee0f34aea26d9f40320b531c474676228
+...
+----------------------------------------------------------------
+```
+
+Note down the `elastic` user password and HTTP CA fingerprint for the next sections. In the examples below they will be stored in the variables `ELASTIC_PASSWORD` and `CERT_FINGERPRINT` respectively.
+
+Depending on the circumstances there are two options for verifying the HTTPS connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint.
+
+
+### Verifying HTTPS with CA certificates [_verifying_https_with_ca_certificates]
+
+Using the `ca_certs` option is the default way the Python {{es}} client verifies an HTTPS connection.
+
+The generated root CA certificate can be found in the `certs` directory in your {{es}} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you’re running {{es}} in Docker there is [additional documentation for retrieving the CA certificate](docs-content://deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md).
+
+Once you have the `http_ca.crt` file somewhere accessible pass the path to the client via `ca_certs`:
+
+```python
+from elasticsearch import Elasticsearch
+
+# Password for the 'elastic' user generated by Elasticsearch
+ELASTIC_PASSWORD = "<password>"
+
+# Create the client instance
+client = Elasticsearch(
+    "https://localhost:9200",
+    ca_certs="/path/to/http_ca.crt",
+    basic_auth=("elastic", ELASTIC_PASSWORD)
+)
+
+# Successful response!
+client.info()
+# {'name': 'instance-0000000000', 'cluster_name': ...}
+```
+
+::::{note}
+If you don’t specify `ca_certs` or `ssl_assert_fingerprint` then the [certifi package](https://certifiio.readthedocs.io) will be used for `ca_certs` by default if available.
+::::
+
+
+
+### Verifying HTTPS with certificate fingerprints (Python 3.10 or later) [_verifying_https_with_certificate_fingerprints_python_3_10_or_later]
+
+::::{note}
+Using this method **requires using Python 3.10 or later** and isn’t available when using the `aiohttp` HTTP client library so can’t be used with `AsyncElasticsearch`.
+::::
+
+
+This method of verifying the HTTPS connection takes advantage of the certificate fingerprint value noted down earlier. Take this SHA256 fingerprint value and pass it to the Python {{es}} client via `ssl_assert_fingerprint`:
+
+```python
+from elasticsearch import Elasticsearch
+
+# Fingerprint either from Elasticsearch startup or above script.
+# Colons and uppercase/lowercase don't matter when using
+# the 'ssl_assert_fingerprint' parameter
+CERT_FINGERPRINT = "A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28"
+
+# Password for the 'elastic' user generated by Elasticsearch
+ELASTIC_PASSWORD = "<password>"
+
+client = Elasticsearch(
+    "https://localhost:9200",
+    ssl_assert_fingerprint=CERT_FINGERPRINT,
+    basic_auth=("elastic", ELASTIC_PASSWORD)
+)
+
+# Successful response!
+client.info()
+# {'name': 'instance-0000000000', 'cluster_name': ...}
+```
+
+The certificate fingerprint can be calculated using `openssl x509` with the certificate file:
+
+```sh
+openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt
+```
+
+If you don’t have access to the generated CA file from {{es}} you can use the following script to output the root CA fingerprint of the {{es}} instance with `openssl s_client`:
+
+```sh
+# Replace the values of 'localhost' and '9200' to the
+# corresponding host and port values for the cluster.
+openssl s_client -connect localhost:9200 -servername localhost -showcerts </dev/null 2>/dev/null \
+  | openssl x509 -fingerprint -sha256 -noout -in /dev/stdin
+```
+
+The output of `openssl x509` will look something like this:
+
+```sh
+SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28
+```
+
+
+## Connecting without security enabled [connect-no-security]
+
+::::{warning}
+Running {{es}} without security enabled is not recommended.
+::::
+
+
+If your cluster is configured with [security explicitly disabled](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md) then you can connect via HTTP:
+
+```python
+from elasticsearch import Elasticsearch
+
+# Create the client instance
+client = Elasticsearch("http://localhost:9200")
+
+# Successful response!
+client.info()
+# {'name': 'instance-0000000000', 'cluster_name': ...}
+```
+
+
+## Connecting to multiple nodes [connect-url]
+
+The Python {{es}} client supports sending API requests to multiple nodes in the cluster. This means that work will be more evenly spread across the cluster instead of hammering the same node over and over with requests. To configure the client with multiple nodes you can pass a list of URLs, each URL will be used as a separate node in the pool.
+
+```python
+from elasticsearch import Elasticsearch
+
+# List of nodes to connect use with different hosts and ports.
+NODES = [
+    "https://localhost:9200",
+    "https://localhost:9201",
+    "https://localhost:9202",
+]
+
+# Password for the 'elastic' user generated by Elasticsearch
+ELASTIC_PASSWORD = "<password>"
+
+client = Elasticsearch(
+    NODES,
+    ca_certs="/path/to/http_ca.crt",
+    basic_auth=("elastic", ELASTIC_PASSWORD)
+)
+```
+
+By default nodes are selected using round-robin, but alternate node selection strategies can be configured with `node_selector_class` parameter.
+
+::::{note}
+If your {{es}} cluster is behind a load balancer like when using Elastic Cloud you won’t need to configure multiple nodes. Instead use the load balancer host and port.
+::::
+
+
+
+## Authentication [authentication]
+
+This section contains code snippets to show you how to connect to various {{es}} providers. All authentication methods are supported on the client constructor or via the per-request `.options()` method:
+
+```python
+from elasticsearch import Elasticsearch
+
+# Authenticate from the constructor
+client = Elasticsearch(
+    "https://localhost:9200",
+    ca_certs="/path/to/http_ca.crt",
+    basic_auth=("username", "password")
+)
+
+# Authenticate via the .options() method:
+client.options(
+    basic_auth=("username", "password")
+).indices.get(index="*")
+
+# You can persist the authenticated client to use
+# later or use for multiple API calls:
+auth_client = client.options(api_key="api_key")
+for i in range(10):
+    auth_client.index(
+        index="example-index",
+        document={"field": i}
+    )
+```
+
+
+### HTTP Basic authentication (Username and Password) [auth-basic]
+
+HTTP Basic authentication uses the `basic_auth` parameter by passing in a username and password within a tuple:
+
+```python
+from elasticsearch import Elasticsearch
+
+# Adds the HTTP header 'Authorization: Basic <base64 username:password>'
+client = Elasticsearch(
+    "https://localhost:9200",
+    ca_certs="/path/to/http_ca.crt",
+    basic_auth=("username", "password")
+)
+```
+
+
+### HTTP Bearer authentication [auth-bearer]
+
+HTTP Bearer authentication uses the `bearer_auth` parameter by passing the token as a string. This authentication method is used by [Service Account Tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token) and [Bearer Tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token).
+
+```python
+from elasticsearch import Elasticsearch
+
+# Adds the HTTP header 'Authorization: Bearer token-value'
+client = Elasticsearch(
+    "https://localhost:9200",
+    bearer_auth="token-value"
+)
+```
+
+
+### API Key authentication [auth-apikey]
+
+You can configure the client to use {{es}}'s API Key for connecting to your cluster. These can be generated through the [Elasticsearch Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) or [Kibana Stack Management](docs-content://deploy-manage/api-keys/elasticsearch-api-keys.md#create-api-key).
+
+```python
+from elasticsearch import Elasticsearch
+
+# Adds the HTTP header 'Authorization: ApiKey <base64 api_key.id:api_key.api_key>'
+client = Elasticsearch(
+    "https://localhost:9200",
+    ca_certs="/path/to/http_ca.crt",
+    api_key="api_key",
+)
+```
+
+
+## Using the Client in a Function-as-a-Service Environment [connecting-faas]
+
+This section illustrates the best practices for leveraging the {{es}} client in a Function-as-a-Service (FaaS) environment.
+
+The most influential optimization is to initialize the client outside of the function, the global scope.
+
+This practice does not only improve performance but also enables background functionality as – for example – [sniffing](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). The following examples provide a skeleton for the best practices.
+
+::::{important}
+The async client shouldn’t be used within Function-as-a-Service as a new event loop must be started for each invocation. Instead the synchronous `Elasticsearch` client is recommended.
+::::
+
+
+
+### GCP Cloud Functions [connecting-faas-gcp]
+
+```python
+from elasticsearch import Elasticsearch
+
+# Client initialization
+client = Elasticsearch(
+    cloud_id="deployment-name:ABCD...",
+    api_key=...
+)
+
+def main(request):
+    # Use the client
+    client.search(index=..., query={"match_all": {}})
+```
+
+
+### AWS Lambda [connecting-faas-aws]
+
+```python
+from elasticsearch import Elasticsearch
+
+# Client initialization
+client = Elasticsearch(
+    cloud_id="deployment-name:ABCD...",
+    api_key=...
+)
+
+def main(event, context):
+    # Use the client
+    client.search(index=..., query={"match_all": {}})
+```
+
+
+### Azure Functions [connecting-faas-azure]
+
+```python
+import azure.functions as func
+from elasticsearch import Elasticsearch
+
+# Client initialization
+client = Elasticsearch(
+    cloud_id="deployment-name:ABCD...",
+    api_key=...
+)
+
+def main(request: func.HttpRequest) -> func.HttpResponse:
+    # Use the client
+    client.search(index=..., query={"match_all": {}})
+```
+
+Resources used to assess these recommendations:
+
+* [GCP Cloud Functions: Tips & Tricks](https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations)
+* [Best practices for working with AWS Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html)
+* [Azure Functions Python developer guide](https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables)
+* [AWS Lambda: Comparing the effect of global scope](https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.html)
diff -pruN 8.17.2-2/docs/reference/dsl_configuration.md 9.2.0-1/docs/reference/dsl_configuration.md
--- 8.17.2-2/docs/reference/dsl_configuration.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/dsl_configuration.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,106 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/_configuration.html
+navigation_title: Configuration
+---
+
+# Python DSL configuration for {{es}} [_configuration]
+
+There are several ways to configure connections for the library. The easiest and most useful approach is to define one default connection that can be used every time an API call is made without explicitly passing in other connections.
+
+::::{note}
+Unless you want to access multiple clusters from your application, it is highly recommended that you use the `create_connection` method and all operations will use that connection automatically.
+
+::::
+
+
+## Default connection [_default_connection]
+
+To define a default connection that can be used globally, use the `connections` module and the `create_connection` method like this:
+
+```python
+from elasticsearch.dsl import connections
+
+connections.create_connection(hosts=['localhost'], timeout=20)
+```
+
+### Single connection with an alias [_single_connection_with_an_alias]
+
+You can define the `alias` or name of a connection so you can easily refer to it later. The default value for `alias` is `default`.
+
+```python
+from elasticsearch.dsl import connections
+
+connections.create_connection(alias='my_new_connection', hosts=['localhost'], timeout=60)
+```
+
+Additional keyword arguments (`hosts` and `timeout` in our example) will be passed to the `Elasticsearch` class from `elasticsearch-py`.
+
+To see all possible configuration options refer to the [documentation](https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html).
+
+
+
+## Multiple clusters [_multiple_clusters]
+
+You can define multiple connections to multiple clusters at the same time using the `configure` method:
+
+```python
+from elasticsearch.dsl import connections
+
+connections.configure(
+    default={'hosts': 'localhost'},
+    dev={
+        'hosts': ['esdev1.example.com:9200'],
+        'sniff_on_start': True
+    }
+)
+```
+
+Such connections will be constructed lazily when requested for the first time.
+
+You can alternatively define multiple connections by adding them one by one as shown in the following example:
+
+```python
+# if you have configuration options to be passed to Elasticsearch.__init__
+# this also shows creating a connection with the alias 'qa'
+connections.create_connection('qa', hosts=['esqa1.example.com'], sniff_on_start=True)
+
+# if you already have an Elasticsearch instance ready
+connections.add_connection('another_qa', my_client)
+```
+
+### Using aliases [_using_aliases]
+
+When using multiple connections, you can refer to them using the string alias specified when you created the connection.
+
+This example shows how to use an alias to a connection:
+
+```python
+s = Search(using='qa')
+```
+
+A `KeyError` will be raised if there is no connection registered with that alias.
+
+
+
+## Manual [_manual]
+
+If you don’t want to supply a global configuration, you can always pass in your own connection as an instance of `elasticsearch.Elasticsearch` with the parameter `using` wherever it is accepted like this:
+
+```python
+s = Search(using=Elasticsearch('localhost'))
+```
+
+You can even use this approach to override any connection the object might be already associated with:
+
+```python
+s = s.using(Elasticsearch('otherhost:9200'))
+```
+
+::::{note}
+When using the `dsl` module, it is highly recommended that you use the built-in serializer (`elasticsearch.dsl.serializer.serializer`) to ensure your objects are correctly serialized into `JSON` every time. The `create_connection` method that is described here (and that the `configure` method uses under the hood) will do that automatically for you, unless you explicitly specify your own serializer. The built-in serializer also allows you to serialize your own objects - just define a `to_dict()` method on your objects and that method will be automatically called when serializing your custom objects to `JSON`.
+
+::::
+
+
+
diff -pruN 8.17.2-2/docs/reference/dsl_examples.md 9.2.0-1/docs/reference/dsl_examples.md
--- 8.17.2-2/docs/reference/dsl_examples.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/dsl_examples.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,10 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/_examples.html
+navigation_title: Examples
+---
+
+# {{es}} Python DSL examples [_examples]
+
+Please see the [DSL examples](https://github.com/elastic/elasticsearch-py/tree/master/examples/dsl) directory to see some complex examples using the DSL module.
+
diff -pruN 8.17.2-2/docs/reference/dsl_how_to_guides.md 9.2.0-1/docs/reference/dsl_how_to_guides.md
--- 8.17.2-2/docs/reference/dsl_how_to_guides.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/dsl_how_to_guides.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,1755 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/_how_to_guides.html
+---
+
+# How-To Guides [_how_to_guides]
+
+## Search DSL [search_dsl]
+
+### The `Search` object [_the_search_object]
+
+The `Search` object represents the entire search request:
+
+* queries
+* filters
+* aggregations
+* k-nearest neighbor searches
+* sort
+* pagination
+* highlighting
+* suggestions
+* collapsing
+* additional parameters
+* associated client
+
+The API is designed to be chainable. With the exception of the aggregations functionality this means that the `Search` object is immutable -all changes to the object will result in a shallow copy being created which contains the changes. You can safely pass the `Search` object to foreign code without fear of it modifying your objects as long as it sticks to the `Search` object APIs.
+
+You can pass an instance of the [elasticsearch client](https://elasticsearch-py.readthedocs.io/) when instantiating the `Search` object:
+
+```python
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import Search
+
+client = Elasticsearch()
+
+s = Search(using=client)
+```
+
+You can also define the client at a later time (for more options see the `configuration` chapter):
+
+```python
+s = s.using(client)
+```
+
+::::{note}
+All methods return a *copy* of the object, making it safe to pass to outside code.
+
+::::
+
+
+The API is chainable, allowing you to combine multiple method calls in one statement:
+
+```python
+s = Search().using(client).query(Match("title", "python"))
+```
+
+To send the request to Elasticsearch:
+
+```python
+response = s.execute()
+```
+
+If you just want to iterate over the hits returned by your search you can iterate over the `Search` object:
+
+```python
+for hit in s:
+    print(hit.title)
+```
+
+Search results will be cached. Subsequent calls to `execute` or trying to iterate over an already executed `Search` object will not trigger additional requests being sent to Elasticsearch. To force a new request to be issued specify `ignore_cache=True` when calling `execute`.
+
+For debugging purposes you can serialize the `Search` object to a `dict` with the raw Elasticsearch request:
+
+```python
+print(s.to_dict())
+```
+
+#### Delete By Query [_delete_by_query]
+
+You can delete the documents matching a search by calling `delete` on the `Search` object instead of `execute` like this:
+
+```python
+s = Search(index='i').query(Match("title", "python"))
+response = s.delete()
+```
+
+To pass [deletion parameters](https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.delete_by_query)
+in your query, you can add them by calling ``params`` on the ``Search`` object before ``delete`` like this:
+
+```python
+s = Search(index='i').query("match", title="python")
+s = s.params(ignore_unavailable=False, wait_for_completion=True)
+response = s.delete()
+```
+
+
+#### Queries [_queries]
+
+The `elasticsearch.dsl.query` module provides classes for all Elasticsearch query types. These classes accept keyword arguments in their constructors, which are serialized to the appropriate format to be sent to Elasticsearch. There is a clear one-to-one mapping between the raw query and its equivalent class-based version:
+
+```python
+>>> from elasticsearch.dsl.query import MultiMatch, Match
+
+>>> q = MultiMatch(query='python django', fields=['title', 'body'])
+>>> q.to_dict()
+{'multi_match': {'query': 'python django', 'fields': ['title', 'body']}}
+
+>>> q = Match("title", {"query": "web framework", "type": "phrase"})
+>>> q.to_dict()
+{'match': {'title': {'query': 'web framework', 'type': 'phrase'}}}
+```
+
+An alternative to the class-based queries is to use the `Q` shortcut, passing a query name followed by its parameters, or the raw query as a `dict`:
+
+```python
+from elasticsearch.dsl import Q
+
+Q("multi_match", query='python django', fields=['title', 'body'])
+Q({"multi_match": {"query": "python django", "fields": ["title", "body"]}})
+```
+
+To add a query to the `Search` object, use the `.query()` method. This works with class-based or `Q` queries:
+
+```python
+q = Q("multi_match", query='python django', fields=['title', 'body'])
+s = s.query(q)
+```
+
+As a shortcut the `query()` method also accepts all the parameters of the `Q` shortcut directly:
+
+```python
+s = s.query("multi_match", query='python django', fields=['title', 'body'])
+```
+
+If you already have a query object, or a `dict` representing one, you can assign it to the `query` attribute of a `Search` object to add it to it, replacing any previously configured queries:
+
+```python
+s.query = Q('bool', must=[Q('match', title='python'), Q('match', body='best')])
+```
+
+
+#### Dotted fields [_dotted_fields]
+
+Sometimes you want to refer to a field within another field, either as a multi-field (`title.keyword`) or in a structured `json` document like `address.city`. This is not a problem when using class-based queries, but when working without classes it is often required to pass field names as keyword arguments. To make this easier, you can use `__` (double underscore) in place of a dot in a keyword argument:
+
+```python
+s = Search()
+s = s.filter('term', category__keyword='Python')
+s = s.query('match', address__city='prague')
+```
+
+Alternatively you can use Python’s keyword argument unpacking:
+
+```python
+s = Search()
+s = s.filter('term', **{'category.keyword': 'Python'})
+s = s.query('match', **{'address.city': 'prague'})
+```
+
+
+#### Query combination [_query_combination]
+
+Query objects can be combined using logical operators `|`, `&` and `~`:
+
+```python
+>>> q = Match("title", "python") | Match("title", "django")
+>>> q.to_dict()
+{'bool': {'should': [{'match': {'title': 'python'}}, {'match': {'title': 'django'}}]}}
+
+>>> q = Match("title", "python") & Match("title", "django")
+>>> q.to_dict()
+{'bool': {'must': [{'match': {'title': 'python'}}, {'match': {'title': 'django'}}]}}
+
+>>> q = ~Match("title", "python")
+>>> q.to_dict()
+{'bool': {'must_not': [{'match': {'title': 'python'}}]}}
+```
+
+When you call the `.query()` method multiple times, the `&` operator will be used internally to combine all the queries:
+
+```python
+s = s.query().query()
+print(s.to_dict())
+# {"query": {"bool": {...}}}
+```
+
+If you want to have precise control over the query form, use the `Q` shortcut to directly construct the combined query:
+
+```python
+q = Q('bool',
+    must=[Q('match', title='python')],
+    should=[Q(...), Q(...)],
+    minimum_should_match=1
+)
+s = Search().query(q)
+```
+
+
+#### Filters [_filters]
+
+If you want to add a query in a [filter context](docs-content://explore-analyze/query-filter/languages/querydsl.md) you can use the `filter()` method to make things easier:
+
+```python
+from elasticsearch.dsl.query import Terms
+
+s = Search()
+s = s.filter(Terms("tags", ['search', 'python']))
+```
+
+Behind the scenes this will produce a `Bool` query and place the specified `terms` query into its `filter` branch, making it equivalent to:
+
+```python
+from elasticsearch.dsl.query import Terms, Bool
+
+s = Search()
+s = s.query(Bool(filter=[Terms("tags", ["search", "python"])]))
+```
+
+If you want to use the `post_filter` element for faceted navigation, use the `.post_filter()` method.
+
+The `exclude()` method works like `filter()`, but it applies the query as negated:
+
+```python
+s = Search()
+s = s.exclude(Terms("tags", ['search', 'python']))
+```
+
+which is shorthand for:
+
+```python
+s = s.query(Bool(filter=[~Terms("tags", ["search", "python"])]))
+```
+
+
+#### Aggregations [_aggregations]
+
+As with queries, there are classes that represent each aggregation type, all accessible through the `elasticsearch.dsl.aggs` module:
+
+```python
+from elasticsearch.dsl import aggs
+
+a = aggs.Terms(field="tags")
+# {"terms": {"field": "tags"}}
+```
+
+It is also possible to define an aggregation using the `A` shortcut:
+
+```python
+from elasticsearch.dsl import A
+
+A('terms', field='tags')
+```
+
+To nest aggregations, you can use the `.bucket()`, `.metric()` and `.pipeline()` methods:
+
+```python
+a = aggs.Terms(field="category")
+# {'terms': {'field': 'category'}}
+
+a.metric("clicks_per_category", aggs.Sum(field="clicks")) \
+    .bucket("tags_per_category", aggs.Terms(field="tags"))
+# {
+#   'terms': {'field': 'category'},
+#   'aggs': {
+#     'clicks_per_category': {'sum': {'field': 'clicks'}},
+#     'tags_per_category': {'terms': {'field': 'tags'}}
+#   }
+# }
+```
+
+To add aggregations to the `Search` object, use the `.aggs` property, which acts as a top-level aggregation:
+
+```python
+s = Search()
+a = aggs.Terms(field="category")
+s.aggs.bucket("category_terms", a)
+# {
+#   'aggs': {
+#     'category_terms': {
+#       'terms': {
+#         'field': 'category'
+#       }
+#     }
+#   }
+# }
+```
+
+or
+
+```python
+s = Search()
+s.aggs.bucket("articles_per_day", aggs.DateHistogram(field="publish_date", interval="day")) \
+    .metric("clicks_per_day", aggs.Sum(field="clicks")) \
+    .pipeline("moving_click_average", aggs.MovingAvg(buckets_path="clicks_per_day")) \
+    .bucket("tags_per_day", aggs.Terms(field="tags"))
+
+s.to_dict()
+# {
+#   "aggs": {
+#     "articles_per_day": {
+#       "date_histogram": { "interval": "day", "field": "publish_date" },
+#       "aggs": {
+#         "clicks_per_day": { "sum": { "field": "clicks" } },
+#         "moving_click_average": { "moving_avg": { "buckets_path": "clicks_per_day" } },
+#         "tags_per_day": { "terms": { "field": "tags" } }
+#       }
+#     }
+#   }
+# }
+```
+
+You can access an existing bucket by its name:
+
+```python
+s = Search()
+
+s.aggs.bucket("per_category", aggs.Terms(field="category"))
+s.aggs["per_category"].metric("clicks_per_category", aggs.Sum(field="clicks"))
+s.aggs["per_category"].bucket("tags_per_category", aggs.Terms(field="tags"))
+```
+
+::::{note}
+When chaining multiple aggregations, there is a difference between what `.bucket()` and `.metric()` methods return - `.bucket()` returns the newly defined bucket while `.metric()` returns its parent bucket to allow further chaining.
+
+::::
+
+
+As opposed to other methods on the `Search` objects, aggregations are defined in-place, without returning a new copy.
+
+
+#### K-Nearest Neighbor Searches [_k_nearest_neighbor_searches]
+
+To issue a kNN search, use the `.knn()` method:
+
+```python
+s = Search()
+vector = get_embedding("search text")
+
+s = s.knn(
+    field="embedding",
+    k=5,
+    num_candidates=10,
+    query_vector=vector
+)
+```
+
+The `field`, `k` and `num_candidates` arguments can be given as positional or keyword arguments and are required. In addition to these, `query_vector` or `query_vector_builder` must be given as well.
+
+The `.knn()` method can be invoked multiple times to include multiple kNN searches in the request.
+
+
+#### Sorting [_sorting]
+
+To specify sorting order, use the `.sort()` method:
+
+```python
+s = Search().sort(
+    'category',
+    '-title',
+    {"lines" : {"order" : "asc", "mode" : "avg"}}
+)
+```
+
+It accepts positional arguments which can be either strings or dictionaries. String value is a field name, optionally prefixed by the `-` sign to specify a descending order.
+
+To reset the sorting, just call the method with no arguments:
+
+```python
+s = s.sort()
+```
+
+
+#### Pagination [_pagination]
+
+To specify the from/size parameters, apply the standard Python slicing operator on the `Search` instance:
+
+```python
+s = s[10:20]
+# {"from": 10, "size": 10}
+
+s = s[:20]
+# {"size": 20}
+
+s = s[10:]
+# {"from": 10}
+
+s = s[10:20][2:]
+# {"from": 12, "size": 8}
+```
+
+If you want to access all the documents matched by your query you can use the `scan` method which uses the scan/scroll elasticsearch API:
+
+```python
+for hit in s.scan():
+    print(hit.title)
+```
+
+Note that in this case the results won’t be sorted.
+
+
+#### Highlighting [_highlighting]
+
+To set common attributes for highlighting use the `highlight_options` method:
+
+```python
+s = s.highlight_options(order='score')
+```
+
+Enabling highlighting for individual fields is done using the `highlight` method:
+
+```python
+s = s.highlight('title')
+# or, including parameters:
+s = s.highlight('title', fragment_size=50)
+```
+
+The fragments in the response will then be available on each `Result` object as `.meta.highlight.FIELD` which will contain the list of fragments:
+
+```python
+response = s.execute()
+for hit in response:
+    for fragment in hit.meta.highlight.title:
+        print(fragment)
+```
+
+
+#### Suggestions [_suggestions]
+
+To specify a suggest request on your `Search` object use the `suggest` method:
+
+```python
+# check for correct spelling
+s = s.suggest('my_suggestion', 'pyhton', term={'field': 'title'})
+```
+
+The first argument is the name of the suggestions (name under which it will be returned), second is the actual text you wish the suggester to work on and the keyword arguments will be added to the suggest’s json as-is which means that it should be one of `term`, `phrase` or `completion` to indicate which type of suggester should be used.
+
+
+#### Collapsing [_collapsing]
+
+To collapse search results use the `collapse` method on your `Search` object:
+
+```python
+s = Search().query(Match("message", "GET /search"))
+# collapse results by user_id
+s = s.collapse("user_id")
+```
+
+The top hits will only include one result per `user_id`. You can also expand each collapsed top hit with the `inner_hits` parameter, `max_concurrent_group_searches` being the number of concurrent requests allowed to retrieve the inner hits per group:
+
+```python
+inner_hits = {"name": "recent_search", "size": 5, "sort": [{"@timestamp": "desc"}]}
+s = s.collapse("user_id", inner_hits=inner_hits, max_concurrent_group_searches=4)
+```
+
+
+#### More Like This Query [_more_like_this_query]
+
+To use Elasticsearch’s `more_like_this` functionality, you can use the MoreLikeThis query type.
+
+A simple example is below
+
+```python
+from elasticsearch.dsl.query import MoreLikeThis
+from elasticsearch.dsl import Search
+
+my_text = 'I want to find something similar'
+
+s = Search()
+# We're going to match based only on two fields, in this case text and title
+s = s.query(MoreLikeThis(like=my_text, fields=['text', 'title']))
+# You can also exclude fields from the result to make the response quicker in the normal way
+s = s.source(exclude=["text"])
+response = s.execute()
+
+for hit in response:
+    print(hit.title)
+```
+
+
+#### Extra properties and parameters [_extra_properties_and_parameters]
+
+To set extra properties of the search request, use the `.extra()` method. This can be used to define keys in the body that cannot be defined via a specific API method like `explain` or `search_after`:
+
+```python
+s = s.extra(explain=True)
+```
+
+To set query parameters, use the `.params()` method:
+
+```python
+s = s.params(routing="42")
+```
+
+If you need to limit the fields being returned by elasticsearch, use the `source()` method:
+
+```python
+# only return the selected fields
+s = s.source(['title', 'body'])
+# don't return any fields, just the metadata
+s = s.source(False)
+# explicitly include/exclude fields
+s = s.source(includes=["title"], excludes=["user.*"])
+# reset the field selection
+s = s.source(None)
+```
+
+
+#### Serialization and Deserialization [_serialization_and_deserialization]
+
+The search object can be serialized into a dictionary by using the `.to_dict()` method.
+
+You can also create a `Search` object from a `dict` using the `from_dict` class method. This will create a new `Search` object and populate it using the data from the dict:
+
+```python
+s = Search.from_dict({"query": {"match": {"title": "python"}}})
+```
+
+If you wish to modify an existing `Search` object, overriding it’s properties, instead use the `update_from_dict` method that alters an instance **in-place**:
+
+```python
+s = Search(index='i')
+s.update_from_dict({"query": {"match": {"title": "python"}}, "size": 42})
+```
+
+
+
+### Response [_response]
+
+You can execute your search by calling the `.execute()` method that will return a `Response` object. The `Response` object allows you access to any key from the response dictionary via attribute access. It also provides some convenient helpers:
+
+```python
+response = s.execute()
+
+print(response.success())
+# True
+
+print(response.took)
+# 12
+
+print(response.hits.total.relation)
+# eq
+print(response.hits.total.value)
+# 142
+
+print(response.suggest.my_suggestions)
+```
+
+If you want to inspect the contents of the `response` objects, just use its `to_dict` method to get access to the raw data for pretty printing.
+
+#### Hits [_hits]
+
+To access the hits returned by the search, use the `hits` property or just iterate over the `Response` object:
+
+```python
+response = s.execute()
+print(f"Total {response.hits.total} hits found.")
+for h in response:
+    print(h.title, h.body)
+```
+
+::::{note}
+If you are only seeing partial results (e.g. 10000 or even 10 results), consider using the option `s.extra(track_total_hits=True)` to get a full hit count.
+
+::::
+
+
+
+#### Result [_result]
+
+The individual hits is wrapped in a convenience class that allows attribute access to the keys in the returned dictionary. All the metadata for the results are accessible via `meta` (without the leading `_`):
+
+```python
+response = s.execute()
+h = response.hits[0]
+print(f"/{h.meta.index}/{h.meta.doc_type}/{h.meta.id} returned with score {h.meta.score}")
+```
+
+::::{note}
+If your document has a field called `meta` you have to access it using the get item syntax: `hit['meta']`.
+
+::::
+
+
+
+#### Aggregations [_aggregations_2]
+
+Aggregations are available through the `aggregations` property:
+
+```python
+for tag in response.aggregations.per_tag.buckets:
+    print(tag.key, tag.max_lines.value)
+```
+
+
+
+### `MultiSearch` [_multisearch]
+
+If you need to execute multiple searches at the same time you can use the `MultiSearch` class which will use the `_msearch` API:
+
+```python
+from elasticsearch.dsl import MultiSearch, Search
+from elasticsearch.dsl.query import Term
+
+ms = MultiSearch(index='blogs')
+
+ms = ms.add(Search().filter(Term("tags", "python")))
+ms = ms.add(Search().filter(Term("tags", 'elasticsearch')))
+
+responses = ms.execute()
+
+for response in responses:
+    print("Results for query %r." % response._search.query)
+    for hit in response:
+        print(hit.title)
+```
+
+
+### `EmptySearch` [_emptysearch]
+
+The `EmptySearch` class can be used as a fully compatible version of `Search` that will return no results, regardless of any queries configured.
+
+
+
+## Persistence [_persistence_2]
+
+You can use the DSL module to define your mappings and a basic persistent layer for your application.
+
+For more comprehensive examples have a look at the [DSL examples](https://github.com/elastic/elasticsearch-py/tree/main/examples/dsl) directory in the repository.
+
+### Document [doc_type]
+
+If you want to create a model-like wrapper around your documents, use the `Document` class (or the equivalent `AsyncDocument` for asynchronous applications). It can also be used to create all the necessary mappings and settings in Elasticsearch (see [Document life cycle](#life-cycle) below for details).
+
+```python
+from datetime import datetime
+from elasticsearch.dsl import Document, Date, Nested, Boolean, \
+    analyzer, InnerDoc, Completion, Keyword, Text
+
+html_strip = analyzer('html_strip',
+    tokenizer="standard",
+    filter=["standard", "lowercase", "stop", "snowball"],
+    char_filter=["html_strip"]
+)
+
+class Comment(InnerDoc):
+    author = Text(fields={'raw': Keyword()})
+    content = Text(analyzer='snowball')
+    created_at = Date()
+
+    def age(self):
+        return datetime.now() - self.created_at
+
+class Post(Document):
+    title = Text()
+    title_suggest = Completion()
+    created_at = Date()
+    published = Boolean()
+    category = Text(
+        analyzer=html_strip,
+        fields={'raw': Keyword()}
+    )
+
+    comments = Nested(Comment)
+
+    class Index:
+        name = 'blog'
+
+    def add_comment(self, author, content):
+        self.comments.append(
+          Comment(author=author, content=content, created_at=datetime.now()))
+
+    def save(self, ** kwargs):
+        self.created_at = datetime.now()
+        return super().save(** kwargs)
+```
+
+#### Data types [_data_types]
+
+The `Document` instances use native python types such as `str` and `datetime` for its attributes. In case of `Object` or `Nested` fields an instance of the `InnerDoc` subclass is used, as in the `add_comment` method in the above example, where we are creating an instance of the `Comment` class.
+
+There are some specific types that were created to make working with some field types easier, for example the `Range` object used in any of the [range fields](elasticsearch://reference/elasticsearch/mapping-reference/range.md):
+
+```python
+from elasticsearch.dsl import Document, DateRange, Keyword, Range
+
+class RoomBooking(Document):
+    room = Keyword()
+    dates = DateRange()
+
+rb = RoomBooking(
+  room='Conference Room II',
+  dates=Range(
+    gte=datetime(2018, 11, 17, 9, 0, 0),
+    lt=datetime(2018, 11, 17, 10, 0, 0)
+  )
+)
+
+# Range supports the in operator correctly:
+datetime(2018, 11, 17, 9, 30, 0) in rb.dates # True
+
+# you can also get the limits and whether they are inclusive or exclusive:
+rb.dates.lower # datetime(2018, 11, 17, 9, 0, 0), True
+rb.dates.upper # datetime(2018, 11, 17, 10, 0, 0), False
+
+# empty range is unbounded
+Range().lower # None, False
+```
+
+
+#### Python Type Hints [_python_type_hints]
+
+Document fields can be defined using standard Python type hints if desired. Here are some simple examples:
+
+```python
+from typing import Optional
+
+class Post(Document):
+    title: str                      # same as title = Text(required=True)
+    created_at: Optional[datetime]  # same as created_at = Date(required=False)
+    published: bool                 # same as published = Boolean(required=True)
+```
+
+::::{note}
+When using `Field` subclasses such as `Text`, `Date` and `Boolean` to define attributes, these classes must be given in the right-hand side.
+
+```python
+class Post(Document):
+    title = Text()  # correct
+    subtitle: Text  # incorrect
+```
+
+Using a `Field` subclass as a Python type hint will result in errors.
+::::
+
+Python types are mapped to their corresponding `Field` types according to the following table:
+
+| Python type | DSL field |
+| --- | --- |
+| `str` | `Text(required=True)` |
+| `bool` | `Boolean(required=True)` |
+| `int` | `Integer(required=True)` |
+| `float` | `Float(required=True)` |
+| `bytes` | `Binary(required=True)` |
+| `datetime` | `Date(required=True)` |
+| `date` | `Date(format="yyyy-MM-dd", required=True)` |
+
+To type a field as optional, the standard `Optional` modifier from the Python `typing` package can be used. When using Python 3.10 or newer, "pipe" syntax can also be used, by adding `| None` to a type. The `List` modifier can be added to a field to convert it to an array, similar to using the `multi=True` argument on the `Field` object.
+
+```python
+from typing import Optional, List
+
+class MyDoc(Document):
+    pub_date: Optional[datetime]  # same as pub_date = Date()
+    middle_name: str | None       # same as middle_name = Text()
+    authors: List[str]            # same as authors = Text(multi=True, required=True)
+    comments: Optional[List[str]] # same as comments = Text(multi=True)
+```
+
+A field can also be given a type hint of an `InnerDoc` subclass, in which case it becomes an `Object` field of that class. When the `InnerDoc` subclass is wrapped with `List`, a `Nested` field is created instead.
+
+```python
+from typing import List
+
+class Address(InnerDoc):
+    ...
+
+class Comment(InnerDoc):
+    ...
+
+class Post(Document):
+    address: Address         # same as address = Object(Address, required=True)
+    comments: List[Comment]  # same as comments = Nested(Comment, required=True)
+```
+
+Unfortunately it is impossible to have Python type hints that uniquely identify every possible Elasticsearch `Field` type. To choose a type that is different than the one that is assigned according to the table above, the desired `Field` instance can be added explicitly as a right-side assignment in the field declaration. The next example creates a field that is typed as `Optional[str]`, but is mapped to `Keyword` instead of `Text`:
+
+```python
+class MyDocument(Document):
+    category: Optional[str] = Keyword()
+```
+
+This form can also be used when additional options need to be given to initialize the field, such as when using custom analyzer settings:
+
+```python
+class Comment(InnerDoc):
+    content: str = Text(analyzer='snowball')
+```
+
+When using type hints as above, subclasses of `Document` and `InnerDoc` inherit some of the behaviors associated with Python dataclasses, as defined by [PEP 681](https://peps.python.org/pep-0681/) and the [dataclass_transform decorator](https://typing.readthedocs.io/en/latest/spec/dataclasses.html#dataclass-transform). To add per-field dataclass options such as `default` or `default_factory`, the `mapped_field()` wrapper can be used on the right side of a typed field declaration:
+
+```python
+class MyDocument(Document):
+    title: str = mapped_field(default="no title")
+    created_at: datetime = mapped_field(default_factory=datetime.now)
+    published: bool = mapped_field(default=False)
+    category: str = mapped_field(Keyword(), default="general")
+```
+
+The `mapped_field()` wrapper function can optionally be given an explicit field type instance as a first positional argument, as the `category` field does in the example above to be defined as `Keyword` instead of the `Text` default.
+
+Static type checkers such as [mypy](https://mypy-lang.org/) and [pyright](https://github.com/microsoft/pyright) can use the type hints and the dataclass-specific options added to the `mapped_field()` function to improve type inference and provide better real-time code completion and suggestions in IDEs.
+
+One situation in which type checkers can’t infer the correct type is when using fields as class attributes. Consider the following example:
+
+```python
+class MyDocument(Document):
+    title: str
+
+doc = MyDocument()
+# doc.title is typed as "str" (correct)
+# MyDocument.title is also typed as "str" (incorrect)
+```
+
+To help type checkers correctly identify class attributes as such, the `M` generic must be used as a wrapper to the type hint, as shown in the next examples:
+
+```python
+from elasticsearch.dsl import M
+
+class MyDocument(Document):
+    title: M[str]
+    created_at: M[datetime] = mapped_field(default_factory=datetime.now)
+
+doc = MyDocument()
+# doc.title is typed as "str"
+# doc.created_at is typed as "datetime"
+# MyDocument.title is typed as "InstrumentedField"
+# MyDocument.created_at is typed as "InstrumentedField"
+```
+
+Note that the `M` type hint does not provide any runtime behavior and its use is not required, but it can be useful to eliminate spurious type errors in IDEs or type checking builds.
+
+The `InstrumentedField` objects returned when fields are accessed as class attributes are proxies for the field instances that can be used anywhere a field needs to be referenced, such as when specifying sort options in a `Search` object:
+
+```python
+# sort by creation date descending, and title ascending
+s = MyDocument.search().sort(-MyDocument.created_at, MyDocument.title)
+```
+
+When specifying sorting order, the `+` and `-` unary operators can be used on the class field attributes to indicate ascending and descending order.
+
+Finally, it is also possible to define class attributes and request that they are ignored when building the Elasticsearch mapping. One way is to type attributes with the `ClassVar` annotation. Alternatively, the `mapped_field()` wrapper function accepts an `exclude` argument that can be set to `True`:
+
+```python
+from typing import ClassVar
+
+class MyDoc(Document):
+    title: M[str] created_at: M[datetime] = mapped_field(default_factory=datetime.now)
+    my_var: ClassVar[str]  # regular class variable, ignored by Elasticsearch
+    anoter_custom_var: int = mapped_field(exclude=True)  # also ignored by Elasticsearch
+```
+
+#### Note on dates [_note_on_dates]
+
+The DSL module will always respect the timezone information (or lack thereof) on the `datetime` objects passed in or stored in Elasticsearch. Elasticsearch itself interprets all datetimes with no timezone information as `UTC`. If you wish to reflect this in your python code, you can specify `default_timezone` when instantiating a `Date` field:
+
+```python
+class Post(Document):
+    created_at = Date(default_timezone='UTC')
+```
+
+In that case any `datetime` object passed in (or parsed from elasticsearch) will be treated as if it were in `UTC` timezone.
+
+
+#### Document life cycle [life-cycle]
+
+Before you first use the `Post` document type, you need to create the mappings in Elasticsearch. For that you can either use the `index` object or create the mappings directly by calling the `init` class method:
+
+```python
+# create the mappings in Elasticsearch
+Post.init()
+```
+
+This code will typically be run in the setup for your application during a code deploy, similar to running database migrations.
+
+To create a new `Post` document just instantiate the class and pass in any fields you wish to set, you can then use standard attribute setting to change/add more fields. Note that you are not limited to the fields defined explicitly:
+
+```python
+# instantiate the document
+first = Post(title='My First Blog Post, yay!', published=True)
+# assign some field values, can be values or lists of values
+first.category = ['everything', 'nothing']
+# every document has an id in meta
+first.meta.id = 47
+
+
+# save the document into the cluster
+first.save()
+```
+
+All the metadata fields (`id`, `routing`, `index`, etc.) can be accessed (and set) via a `meta` attribute or directly using the underscored variant:
+
+```python
+post = Post(meta={'id': 42})
+
+# prints 42
+print(post.meta.id)
+
+# override default index
+post.meta.index = 'my-blog'
+```
+
+::::{note}
+Having all metadata accessible through `meta` means that this name is reserved and you shouldn’t have a field called `meta` on your document. If you, however, need it you can still access the data using the get item (as opposed to attribute) syntax: `post['meta']`.
+
+::::
+
+
+To retrieve an existing document use the `get` class method:
+
+```python
+# retrieve the document
+first = Post.get(id=42)
+# now we can call methods, change fields, ...
+first.add_comment('me', 'This is nice!')
+# and save the changes into the cluster again
+first.save()
+```
+
+The [Update API](https://www.elastic.co/docs/api/doc/elasticsearch/v8/group/endpoint-document) can also be used via the `update` method. By default any keyword arguments, beyond the parameters of the API, will be considered fields with new values. Those fields will be updated on the local copy of the document and then sent over as partial document to be updated:
+
+```python
+# retrieve the document
+first = Post.get(id=42)
+# you can update just individual fields which will call the update API
+# and also update the document in place
+first.update(published=True, published_by='me')
+```
+
+In case you wish to use a `painless` script to perform the update you can pass in the script string as `script` or the `id` of a [stored script](docs-content://explore-analyze/scripting/modules-scripting-using.md) via `script_id`. All additional keyword arguments to the `update` method will then be passed in as parameters of the script. The document will not be updated in place.
+
+```python
+# retrieve the document
+first = Post.get(id=42)
+# we execute a script in elasticsearch with additional kwargs being passed
+# as params into the script
+first.update(script='ctx._source.category.add(params.new_category)',
+             new_category='testing')
+```
+
+If the document is not found in elasticsearch an exception (`elasticsearch.NotFoundError`) will be raised. If you wish to return `None` instead just pass in `ignore=404` to suppress the exception:
+
+```python
+p = Post.get(id='not-in-es', ignore=404)
+p is None
+```
+
+When you wish to retrieve multiple documents at the same time by their `id` you can use the `mget` method:
+
+```python
+posts = Post.mget([42, 47, 256])
+```
+
+`mget` will, by default, raise a `NotFoundError` if any of the documents wasn’t found and `RequestError` if any of the document had resulted in error. You can control this behavior by setting parameters:
+
+* `raise_on_error`: If `True` (default) then any error will cause an exception to be raised. Otherwise all documents containing errors will be treated as missing.
+* `missing`: Can have three possible values: `'none'` (default), `'raise'` and `'skip'`. If a document is missing or errored it will either be replaced with `None`, an exception will be raised or the document will be skipped in the output list entirely.
+
+The index associated with the `Document` is accessible via the `_index` class property which gives you access to the `index` class.
+
+The `_index` attribute is also home to the `load_mappings` method which will update the mapping on the `Index` from elasticsearch. This is very useful if you use dynamic mappings and want the class to be aware of those fields (for example if you wish the `Date` fields to be properly (de)serialized):
+
+```python
+Post._index.load_mappings()
+```
+
+To delete a document just call its `delete` method:
+
+```python
+first = Post.get(id=42)
+first.delete()
+```
+
+#### Integration with Pydantic models
+
+::::{warning}
+This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features.
+::::
+
+::::{note}
+This feature is available in the Python Elasticsearch client starting with release 9.2.0.
+::::
+
+Applications that define their data models using [Pydantic](https://docs.pydantic.dev/latest/) can combine these
+models with Elasticsearch DSL annotations. To take advantage of this option, Pydantic's `BaseModel` base class
+needs to be replaced with `BaseESModel` (or `AsyncBaseESModel` for asynchronous applications), and then the model
+can include type annotations for Pydantic and Elasticsearch both, as demonstrated in the following example:
+
+```python
+from typing import Annotated
+from pydantic import Field
+from elasticsearch import dsl
+from elasticsearch.dsl.pydantic import BaseESModel
+
+class Quote(BaseESModel):
+    quote: str
+    author: Annotated[str, dsl.Keyword()]
+    tags: Annotated[list[str], dsl.Keyword(normalizer="lowercase")]
+    embedding: Annotated[list[float], dsl.DenseVector()] = Field(init=False, default=[])
+
+    class Index:
+        name = "quotes"
+```
+
+In this example, the `quote` attribute is annotated with a `str` type hint. Both Pydantic and Elasticsearch use this 
+annotation.
+
+The `author` and `tags` attributes have a Python type hint and an Elasticsearch annotation, both wrapped with
+Python's `typing.Annotated`. When using the `BaseESModel` class, the typing information intended for Elasticsearch needs
+to be defined inside `Annotated`.
+
+The `embedding` attribute includes a base Python type and an Elasticsearch annotation in the same format as the
+other fields, but it adds Pydantic's `Field` definition as a right-hand side assignment.
+
+Finally, any other items that need to be defined for the Elasticsearch document class, such as `class Index` and
+`class Meta` entries (discussed later), can be added as well.
+
+The next example demonstrates how to define `Object` and `Nested` fields:
+
+```python
+from typing import Annotated
+from pydantic import BaseModel, Field
+from elasticsearch import dsl
+from elasticsearch.dsl.pydantic import BaseESModel
+
+class Phone(BaseModel):
+    type: Annotated[str, dsl.Keyword()] = Field(default="Home")
+    number: str
+
+class Person(BaseESModel):
+    name: str
+    main_phone: Phone          # same as Object(Phone)
+    other_phones: list[Phone]  # same as Nested(Phone)
+
+    class Index:
+        name = "people"
+```
+
+Note that inner classes do not need to be defined with a custom base class; these should be standard Pydantic model 
+classes. The attributes defined in these classes can include Elasticsearch annotations, as long as they are given
+in an `Annotated` type hint.
+
+All model classes that are created as described in this section function like normal Pydantic models and can be used
+anywhere standard Pydantic models are used, but they have some added attributes:
+
+- `_doc`: a class attribute that is a dynamically generated `Document` class to use with the Elasticsearch index.
+- `meta`: an attribute added to all models that includes Elasticsearch document metadata items such as `id`, `score`, etc.
+- `to_doc()`: a method that converts the Pydantic model to an Elasticsearch document.
+- `from_doc()`: a class method that accepts an Elasticsearch document as an argument and returns an equivalent Pydantic model.
+
+These are demonstrated in the examples below:
+
+```python
+# create a Pydantic model
+quote = Quote(
+    quote="An unexamined life is not worth living.",
+    author="Socrates",
+    tags=["phillosophy"]
+)
+
+# save the model to the Elasticsearch index
+quote.to_doc().save()
+
+# get a document from the Elasticsearch index as a Pydantic model
+quote = Quote.from_doc(Quote._doc.get(id=42))
+
+# run a search and print the Pydantic models
+s = Quote._doc.search().query(Match(Quote._doc.quote, "life"))
+for doc in s:
+    quote = Quote.from_doc(doc)
+    print(quote.meta.id, quote.meta.score, quote.quote)
+```
+
+#### Analysis [_analysis]
+
+To specify `analyzer` values for `Text` fields you can just use the name of the analyzer (as a string) and either rely on the analyzer being defined (like built-in analyzers) or define the analyzer yourself manually.
+
+Alternatively, you can create your own analyzer and have the persistence layer handle its creation, from our example earlier:
+
+```python
+from elasticsearch.dsl import analyzer, tokenizer
+
+my_analyzer = analyzer('my_analyzer',
+    tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
+    filter=['lowercase']
+)
+```
+
+Each analysis object needs to have a name (`my_analyzer` and `trigram` in our example) and tokenizers, token filters and char filters also need to specify type (`nGram` in our example).
+
+Once you have an instance of a custom `analyzer` you can also call the [analyze API](https://www.elastic.co/docs/api/doc/elasticsearch/v8/group/endpoint-indices) on it by using the `simulate` method:
+
+```python
+response = my_analyzer.simulate('Hello World!')
+
+# ['hel', 'ell', 'llo', 'lo ', 'o w', ' wo', 'wor', 'orl', 'rld', 'ld!']
+tokens = [t.token for t in response.tokens]
+```
+
+::::{note}
+When creating a mapping which relies on a custom analyzer the index must either not exist or be closed. To create multiple `Document`-defined mappings you can use the `index` object.
+
+::::
+
+
+
+#### Search [_search_2]
+
+To search for this document type, use the `search` class method:
+
+```python
+# by calling .search we get back a standard Search object
+s = Post.search()
+# the search is already limited to the index and doc_type of our document
+s = s.filter('term', published=True).query('match', title='first')
+
+
+results = s.execute()
+
+# when you execute the search the results are wrapped in your document class (Post)
+for post in results:
+    print(post.meta.score, post.title)
+```
+
+Alternatively you can just take a `Search` object and restrict it to return our document type, wrapped in correct class:
+
+```python
+s = Search()
+s = s.doc_type(Post)
+```
+
+You can also combine document classes with standard doc types (just strings), which will be treated as before. You can also pass in multiple `Document` subclasses and each document in the response will be wrapped in it’s class.
+
+If you want to run suggestions, just use the `suggest` method on the `Search` object:
+
+```python
+s = Post.search()
+s = s.suggest('title_suggestions', 'pyth', completion={'field': 'title_suggest'})
+
+response = s.execute()
+
+for result in response.suggest.title_suggestions:
+    print('Suggestions for %s:' % result.text)
+    for option in result.options:
+        print('  %s (%r)' % (option.text, option.payload))
+```
+
+
+#### `class Meta` options [_class_meta_options]
+
+In the `Meta` class inside your document definition you can define various metadata for your document:
+
+* `mapping`: optional instance of `Mapping` class to use as base for the mappings created from the fields on the document class itself.
+
+Any attributes on the `Meta` class that are instance of `MetaField` will be used to control the mapping of the meta fields (`_all`, `dynamic` etc). Just name the parameter (without the leading underscore) as the field you wish to map and pass any parameters to the `MetaField` class:
+
+```python
+class Post(Document):
+    title = Text()
+
+    class Meta:
+        all = MetaField(enabled=False)
+        dynamic = MetaField('strict')
+```
+
+
+#### `class Index` options [_class_index_options]
+
+This section of the `Document` definition can contain any information about the index, its name, settings and other attributes:
+
+* `name`: name of the index to use, if it contains a wildcard (`*`) then it cannot be used for any write operations and an `index` kwarg will have to be passed explicitly when calling methods like `.save()`.
+* `using`: default connection alias to use, defaults to `'default'`
+* `settings`: dictionary containing any settings for the `Index` object like `number_of_shards`.
+* `analyzers`: additional list of analyzers that should be defined on an index (see `analysis` for details).
+* `aliases`: dictionary with any aliases definitions
+
+
+#### Document Inheritance [_document_inheritance]
+
+You can use standard Python inheritance to extend models, this can be useful in a few scenarios. For example if you want to have a `BaseDocument` defining some common fields that several different `Document` classes should share:
+
+```python
+class User(InnerDoc):
+    username: str = mapped_field(Text(fields={'keyword': Keyword()}))
+    email: str
+
+class BaseDocument(Document):
+    created_by: User
+    created_date: datetime
+    last_updated: datetime
+
+    def save(**kwargs):
+        if not self.created_date:
+            self.created_date = datetime.now()
+        self.last_updated = datetime.now()
+        return super(BaseDocument, self).save(**kwargs)
+
+class BlogPost(BaseDocument):
+    class Index:
+        name = 'blog'
+```
+
+Another use case would be using the [join type](elasticsearch://reference/elasticsearch/mapping-reference/parent-join.md) to have multiple different entities in a single index. You can see an [example](https://github.com/elastic/elasticsearch-py/blob/master/examples/dsl/parent_child.py) of this approach. Note that in this case, if the subclasses don’t define their own Index classes, the mappings are merged and shared between all the subclasses.
+
+
+
+### Index [_index]
+
+In typical scenario using `class Index` on a `Document` class is sufficient to perform any action. In a few cases though it can be useful to manipulate an `Index` object directly.
+
+`Index` is a class responsible for holding all the metadata related to an index in elasticsearch - mappings and settings. It is most useful when defining your mappings since it allows for easy creation of multiple mappings at the same time. This is especially useful when setting up your elasticsearch objects in a migration:
+
+```python
+from elasticsearch.dsl import Index, Document, Text, analyzer
+
+blogs = Index('blogs')
+
+# define custom settings
+blogs.settings(
+    number_of_shards=1,
+    number_of_replicas=0
+)
+
+# define aliases
+blogs.aliases(
+    old_blogs={}
+)
+
+# register a document with the index
+blogs.document(Post)
+
+# can also be used as class decorator when defining the Document
+@blogs.document
+class Post(Document):
+    title: str
+
+# You can attach custom analyzers to the index
+
+html_strip = analyzer('html_strip',
+    tokenizer="standard",
+    filter=["standard", "lowercase", "stop", "snowball"],
+    char_filter=["html_strip"]
+)
+
+blogs.analyzer(html_strip)
+
+# delete the index, ignore if it doesn't exist
+blogs.delete(ignore=404)
+
+# create the index in elasticsearch
+blogs.create()
+```
+
+You can also set up a template for your indices and use the `clone` method to create specific copies:
+
+```python
+blogs = Index('blogs', using='production')
+blogs.settings(number_of_shards=2)
+blogs.document(Post)
+
+# create a copy of the index with different name
+company_blogs = blogs.clone('company-blogs')
+
+# create a different copy on different cluster
+dev_blogs = blogs.clone('blogs', using='dev')
+# and change its settings
+dev_blogs.setting(number_of_shards=1)
+```
+
+#### IndexTemplate [index-template]
+
+The DSL module also exposes an option to manage [index templates](docs-content://manage-data/data-store/templates.md) in elasticsearch using the `ComposableIndexTemplate` and `IndexTemplate` classes, which have very similar API to `Index`.
+
+::::{note}
+Composable index templates should be always be preferred over the legacy index templates, since the latter are deprecated.
+
+::::
+
+
+Once an index template is saved in Elasticsearch its contents will be automatically applied to new indices (existing indices are completely unaffected by templates) that match the template pattern (any index starting with `blogs-` in our example), even if the index is created automatically upon indexing a document into that index.
+
+Potential workflow for a set of time based indices governed by a single template:
+
+```python
+from datetime import datetime
+
+from elasticsearch.dsl import Document, Date, Text
+
+
+class Log(Document):
+    content: str
+    timestamp: datetime
+
+    class Index:
+        name = "logs-*"
+
+    def save(self, **kwargs):
+        # assign now if no timestamp given
+        if not self.timestamp:
+            self.timestamp = datetime.now()
+
+        # override the index to go to the proper timeslot
+        kwargs['index'] = self.timestamp.strftime('logs-%Y%m%d')
+        return super().save(**kwargs)
+
+# once, as part of application setup, during deploy/migrations:
+logs = Log._index.as_composable_template('logs', priority=100)
+logs.save()
+
+# to perform search across all logs:
+search = Log.search()
+```
+
+
+
+
+## Faceted Search [faceted_search]
+
+The library comes with a simple abstraction aimed at helping you develop faceted navigation for your data.
+
+### Configuration [_configuration_2]
+
+You can provide several configuration options (as class attributes) when declaring a `FacetedSearch` subclass:
+
+* `index`: the name of the index (as string) to search through, defaults to `'_all'`.
+* `doc_types`: list of `Document` subclasses or strings to be used, defaults to `['_all']`.
+* `fields`: list of fields on the document type to search through. The list will be passes to `MultiMatch` query so can contain boost values (`'title^5'`), defaults to `['*']`.
+* `facets`: dictionary of facets to display/filter on. The key is the name displayed and values should be instances of any `Facet` subclass, for example: `{'tags': TermsFacet(field='tags')}`
+
+#### Facets [_facets]
+
+There are several different facets available:
+
+* `TermsFacet`: provides an option to split documents into groups based on a value of a field, for example `TermsFacet(field='category')`
+* `DateHistogramFacet`: split documents into time intervals, example: `DateHistogramFacet(field="published_date", calendar_interval="day")`
+* `HistogramFacet`: similar to `DateHistogramFacet` but for numerical values: `HistogramFacet(field="rating", interval=2)`
+* `RangeFacet`: allows you to define your own ranges for a numerical fields: `RangeFacet(field="comment_count", ranges=[("few", (None, 2)), ("lots", (2, None))])`
+* `NestedFacet`: is just a simple facet that wraps another to provide access to nested documents: `NestedFacet('variants', TermsFacet(field='variants.color'))`
+
+By default facet results will only calculate document count, if you wish for a different metric you can pass in any single value metric aggregation as the `metric` kwarg (`TermsFacet(field='tags', metric=A('max', field=timestamp))`). When specifying `metric` the results will be, by default, sorted in descending order by that metric. To change it to ascending specify `metric_sort="asc"` and to just sort by document count use `metric_sort=False`.
+
+
+#### Advanced [_advanced]
+
+If you require any custom behavior or modifications simply override one or more of the methods responsible for the class' functions:
+
+* `search(self)`: is responsible for constructing the `Search` object used. Override this if you want to customize the search object (for example by adding a global filter for published articles only).
+* `query(self, search)`: adds the query position of the search (if search input specified), by default using `MultiField` query. Override this if you want to modify the query type used.
+* `highlight(self, search)`: defines the highlighting on the `Search` object and returns a new one. Default behavior is to highlight on all fields specified for search.
+
+
+
+### Usage [_usage]
+
+The custom subclass can be instantiated empty to provide an empty search (matching everything) or with `query`, `filters` and `sort`.
+
+* `query`: is used to pass in the text of the query to be performed. If `None` is passed in (default) a `MatchAll` query will be used. For example `'python web'`
+* `filters`: is a dictionary containing all the facet filters that you wish to apply. Use the name of the facet (from `.facets` attribute) as the key and one of the possible values as value. For example `{'tags': 'python'}`.
+* `sort`: is a tuple or list of fields on which the results should be sorted. The format of the individual fields are to be the same as those passed to `~elasticsearch.dsl.Search.sort`.
+
+#### Response [_response_2]
+
+the response returned from the `FacetedSearch` object (by calling `.execute()`) is a subclass of the standard `Response` class that adds a property called `facets` which contains a dictionary with lists of buckets -each represented by a tuple of key, document count and a flag indicating whether this value has been filtered on.
+
+
+
+### Example [_example]
+
+```python
+from datetime import date
+
+from elasticsearch.dsl import FacetedSearch, TermsFacet, DateHistogramFacet
+
+class BlogSearch(FacetedSearch):
+    doc_types = [Article, ]
+    # fields that should be searched
+    fields = ['tags', 'title', 'body']
+
+    facets = {
+        # use bucket aggregations to define facets
+        'tags': TermsFacet(field='tags'),
+        'publishing_frequency': DateHistogramFacet(field='published_from', interval='month')
+    }
+
+    def search(self):
+        # override methods to add custom pieces
+        s = super().search()
+        return s.filter('range', publish_from={'lte': 'now/h'})
+
+bs = BlogSearch('python web', {'publishing_frequency': date(2015, 6)})
+response = bs.execute()
+
+# access hits and other attributes as usual
+total = response.hits.total
+print('total hits', total.relation, total.value)
+for hit in response:
+    print(hit.meta.score, hit.title)
+
+for (tag, count, selected) in response.facets.tags:
+    print(tag, ' (SELECTED):' if selected else ':', count)
+
+for (month, count, selected) in response.facets.publishing_frequency:
+    print(month.strftime('%B %Y'), ' (SELECTED):' if selected else ':', count)
+```
+
+
+
+## Update By Query [update_by_query]
+
+### The `Update By Query` object [_the_update_by_query_object]
+
+The `Update By Query` object enables the use of the [_update_by_query](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-update-by-query) endpoint to perform an update on documents that match a search query.
+
+The object is implemented as a modification of the `Search` object, containing a subset of its query methods, as well as a script method, which is used to make updates.
+
+The `Update By Query` object implements the following `Search` query types:
+
+* queries
+* filters
+* excludes
+
+For more information on queries, see the `search_dsl` chapter.
+
+Like the `Search` object, the API is designed to be chainable. This means that the `Update By Query` object is immutable: all changes to the object will result in a shallow copy being created which contains the changes. This means you can safely pass the `Update By Query` object to foreign code without fear of it modifying your objects as long as it sticks to the `Update By Query` object APIs.
+
+You can define your client in a number of ways, but the preferred method is to use a global configuration. For more information on defining a client, see the `configuration` chapter.
+
+Once your client is defined, you can instantiate a copy of the `Update By Query` object as seen below:
+
+```python
+from elasticsearch.dsl import UpdateByQuery
+
+ubq = UpdateByQuery().using(client)
+# or
+ubq = UpdateByQuery(using=client)
+```
+
+::::{note}
+All methods return a *copy* of the object, making it safe to pass to outside code.
+
+::::
+
+
+The API is chainable, allowing you to combine multiple method calls in one statement:
+
+```python
+ubq = UpdateByQuery().using(client).query(Match("title", python"))
+```
+
+To send the request to Elasticsearch:
+
+```python
+response = ubq.execute()
+```
+
+It should be noted, that there are limits to the chaining using the script method: calling script multiple times will overwrite the previous value. That is, only a single script can be sent with a call. An attempt to use two scripts will result in only the second script being stored.
+
+Given the below example:
+
+```python
+ubq = UpdateByQuery() \
+    .using(client) \
+    .script(source="ctx._source.likes++") \
+    .script(source="ctx._source.likes+=2")
+```
+
+This means that the stored script by this client will be `'source': 'ctx._source.likes{{plus}}=2'` and the previous call will not be stored.
+
+For debugging purposes you can serialize the `Update By Query` object to a `dict` explicitly:
+
+```python
+print(ubq.to_dict())
+```
+
+Also, to use variables in script see below example:
+
+```python
+ubq.script(
+  source="ctx._source.messages.removeIf(x -> x.somefield == params.some_var)",
+  params={
+    'some_var': 'some_string_val'
+  }
+)
+```
+
+#### Serialization and Deserialization [_serialization_and_deserialization_2]
+
+The search object can be serialized into a dictionary by using the `.to_dict()` method.
+
+You can also create a `Update By Query` object from a `dict` using the `from_dict` class method. This will create a new `Update By Query` object and populate it using the data from the dict:
+
+```python
+ubq = UpdateByQuery.from_dict({"query": {"match": {"title": "python"}}})
+```
+
+If you wish to modify an existing `Update By Query` object, overriding it’s properties, instead use the `update_from_dict` method that alters an instance **in-place**:
+
+```python
+ubq = UpdateByQuery(index='i')
+ubq.update_from_dict({"query": {"match": {"title": "python"}}, "size": 42})
+```
+
+
+#### Extra properties and parameters [_extra_properties_and_parameters_2]
+
+To set extra properties of the search request, use the `.extra()` method. This can be used to define keys in the body that cannot be defined via a specific API method like `explain`:
+
+```python
+ubq = ubq.extra(explain=True)
+```
+
+To set query parameters, use the `.params()` method:
+
+```python
+ubq = ubq.params(routing="42")
+```
+
+
+
+### Response [_response_3]
+
+You can execute your search by calling the `.execute()` method that will return a `Response` object. The `Response` object allows you access to any key from the response dictionary via attribute access. It also provides some convenient helpers:
+
+```python
+response = ubq.execute()
+
+print(response.success())
+# True
+
+print(response.took)
+# 12
+```
+
+If you want to inspect the contents of the `response` objects, just use its `to_dict` method to get access to the raw data for pretty printing.
+
+
+## ES|QL Queries
+
+When working with `Document` classes, you can use the ES|QL query language to retrieve documents. For this you can use the `esql_from()` and `esql_execute()` methods available to all sub-classes of `Document`.
+
+Consider the following `Employee` document definition:
+
+```python
+from elasticsearch.dsl import Document, InnerDoc, M
+
+class Address(InnerDoc):
+    address: M[str]
+    city: M[str]
+    zip_code: M[str]
+
+class Employee(Document):
+    emp_no: M[int]
+    first_name: M[str]
+    last_name: M[str]
+    height: M[float]
+    still_hired: M[bool]
+    address: M[Address]
+
+    class Index:
+        name = 'employees'
+```
+
+The `esql_from()` method creates a base ES|QL query for the index associated with the document class. The following example creates a base query for the `Employee` class:
+
+```python
+query = Employee.esql_from()
+```
+
+This query includes a `FROM` command with the index name, and a `KEEP` command that retrieves all the document attributes.
+
+To execute this query and receive the results, you can pass the query to the `esql_execute()` method:
+
+```python
+for emp in Employee.esql_execute(query):
+    print(f"{emp.name} from {emp.address.city} is {emp.height:.2f}m tall")
+```
+
+In this example, the `esql_execute()` class method runs the query and returns all the documents in the index, up to the maximum of 1000 results allowed by ES|QL. Here is a possible output from this example:
+
+```
+Kevin Macias from North Robert is 1.60m tall
+Drew Harris from Boltonshire is 1.68m tall
+Julie Williams from Maddoxshire is 1.99m tall
+Christopher Jones from Stevenbury is 1.98m tall
+Anthony Lopez from Port Sarahtown is 2.42m tall
+Tricia Stone from North Sueshire is 2.39m tall
+Katherine Ramirez from Kimberlyton is 1.83m tall
+...
+```
+
+To search for specific documents you can extend the base query with additional ES|QL commands that narrow the search criteria. The next example searches for documents that include only employees that are taller than 2 meters, sorted by their last name. It also limits the results to 4 people:
+
+```python
+query = (
+    Employee.esql_from()
+    .where(Employee.height > 2)
+    .sort(Employee.last_name)
+    .limit(4)
+)
+```
+
+When running this query with the same for-loop shown above, possible results would be:
+
+```
+Michael Adkins from North Stacey is 2.48m tall
+Kimberly Allen from Toddside is 2.24m tall
+Crystal Austin from East Michaelchester is 2.30m tall
+Rebecca Berger from Lake Adrianside is 2.40m tall
+```
+
+### Additional fields
+
+ES|QL provides a few ways to add new fields to a query, for example through the `EVAL` command. The following example shows a query that adds an evaluated field:
+
+```python
+from elasticsearch.esql import E, functions
+
+query = (
+    Employee.esql_from()
+    .eval(height_cm=functions.round(Employee.height * 100))
+    .where(E("height_cm") >= 200)
+    .sort(Employee.last_name)
+    .limit(10)
+)
+```
+
+In this example we are adding the height in centimeters to the query, calculated from the `height` document field, which is in meters. The `height_cm` calculated field is available to use in other query clauses, and in particular is referenced in `where()` in this example. Note how the new field is given as `E("height_cm")` in this clause. The `E()` wrapper tells the query builder that the argument is an ES|QL field name and not a string literal. This is done automatically for document fields that are given as class attributes, such as `Employee.height` in the `eval()`. The `E()` wrapper is only needed for fields that are not in the document.
+
+By default, the `esql_execute()` method returns only document instances. To receive any additional fields that are not part of the document in the query results, the `return_additional=True` argument can be passed to it, and then the results are returned as tuples with the document as first element, and a dictionary with the additional fields as second element:
+
+```python
+for emp, additional in Employee.esql_execute(query, return_additional=True):
+    print(emp.name, additional)
+```
+
+Example output from the query given above:
+
+```
+Michael Adkins {'height_cm': 248.0}
+Kimberly Allen {'height_cm': 224.0}
+Crystal Austin {'height_cm': 230.0}
+Rebecca Berger {'height_cm': 240.0}
+Katherine Blake {'height_cm': 214.0}
+Edward Butler {'height_cm': 246.0}
+Steven Carlson {'height_cm': 242.0}
+Mark Carter {'height_cm': 240.0}
+Joseph Castillo {'height_cm': 229.0}
+Alexander Cohen {'height_cm': 245.0}
+```
+
+### Missing fields
+
+The base query returned by the `esql_from()` method includes a `KEEP` command with the complete list of fields that are part of the document. If any subsequent clauses added to the query remove fields that are part of the document, then the `esql_execute()` method will raise an exception, because it will not be able construct complete document instances to return as results.
+
+To prevent errors, it is recommended that the `keep()` and `drop()` clauses are not used when working with `Document` instances.
+
+If a query has missing fields, it can be forced to execute without errors by passing the `ignore_missing_fields=True` argument to `esql_execute()`. When this option is used, returned documents will have any missing fields set to `None`.
+
+## Using asyncio with Elasticsearch Python DSL [asyncio]
+
+The DSL module supports async/await with [asyncio](https://docs.python.org/3/library/asyncio.html). To ensure that you have all the required dependencies, install the `[async]` extra:
+
+```bash
+$ python -m pip install "elasticsearch[async]"
+```
+
+The DSL module also supports [Trio](https://trio.readthedocs.io/en/stable/) when using the Async HTTPX client. You do need to install Trio and HTTPX separately:
+
+```bash
+$ python -m pip install "elasticsearch trio httpx"
+```
+
+### Connections [_connections]
+
+Use the `async_connections` module to manage your asynchronous connections.
+
+```python
+from elasticsearch.dsl import async_connections
+
+async_connections.create_connection(hosts=['localhost'], timeout=20)
+```
+
+If you're using Trio, you need to explicitly request the Async HTTP client:
+
+```python
+from elasticsearch.dsl import async_connections
+
+async_connections.create_connection(hosts=['localhost'], node_class="httpxasync")
+```
+
+All the options available in the `connections` module can be used with `async_connections`.
+
+#### How to avoid *Unclosed client session / connector* warnings on exit [_how_to_avoid_unclosed_client_session_connector_warnings_on_exit]
+
+These warnings come from the `aiohttp` package, which is used internally by the `AsyncElasticsearch` client. They appear often when the application exits and are caused by HTTP connections that are open when they are garbage collected. To avoid these warnings, make sure that you close your connections.
+
+```python
+es = async_connections.get_connection()
+await es.close()
+```
+
+### Search DSL [_search_dsl]
+
+Use the `AsyncSearch` class to perform asynchronous searches.
+
+```python
+from elasticsearch.dsl import AsyncSearch
+from elasticsearch.dsl.query import Match
+
+s = AsyncSearch().query(Match("title", "python"))
+async for hit in s:
+    print(hit.title)
+```
+
+Instead of using the `AsyncSearch` object as an asynchronous iterator, you can explicitly call the `execute()` method to get a `Response` object.
+
+```python
+s = AsyncSearch().query(Match("title", "python"))
+response = await s.execute()
+for hit in response:
+    print(hit.title)
+```
+
+An `AsyncMultiSearch` is available as well.
+
+```python
+from elasticsearch.dsl import AsyncMultiSearch
+from elasticsearch.dsl.query import Term
+
+ms = AsyncMultiSearch(index='blogs')
+
+ms = ms.add(AsyncSearch().filter(Term("tags", "python")))
+ms = ms.add(AsyncSearch().filter(Term("tags", "elasticsearch")))
+
+responses = await ms.execute()
+
+for response in responses:
+    print("Results for query %r." % response.search.query)
+    for hit in response:
+        print(hit.title)
+```
+
+
+### Asynchronous Documents, Indexes, and more [_asynchronous_documents_indexes_and_more]
+
+The `Document`, `BaseESModel`, `Index`, `IndexTemplate`, `Mapping`, `UpdateByQuery` and `FacetedSearch` classes all have asynchronous versions that use the same name with an `Async` prefix. These classes expose the same interfaces as the synchronous versions, but any methods that perform I/O are defined as coroutines.
+
+Auxiliary classes that do not perform I/O do not have asynchronous versions. The same classes can be used in synchronous and asynchronous applications.
+
+When using a custom analyzer in an asynchronous application, use the `async_simulate()` method to invoke the Analyze API on it.
+
+Consult the `api` section for details about each specific method.
+
+
+
diff -pruN 8.17.2-2/docs/reference/dsl_migrating.md 9.2.0-1/docs/reference/dsl_migrating.md
--- 8.17.2-2/docs/reference/dsl_migrating.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/dsl_migrating.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,35 @@
+# Migrating from the `elasticsearch-dsl` package  [_migrating_from_elasticsearch_dsl_package]
+
+In the past the Elasticsearch Python DSL module was distributed as a standalone package called `elasticsearch-dsl`. This package is now deprecated, since all its functionality has been integrated into the main Python client. We recommend all developers to migrate their applications and eliminate their dependency on the `elasticsearch-dsl` package.
+
+To migrate your application, all references to `elasticsearch_dsl` as a top-level package must be changed to `elasticsearch.dsl`. In other words, the underscore from the package name should be replaced by a period.
+
+Here are a few examples:
+
+```python
+# from:
+from elasticsearch_dsl import Date, Document, InnerDoc, Text, connections
+# to:
+from elasticsearch.dsl import Date, Document, InnerDoc, Text, connections
+
+# from:
+from elasticsearch_dsl.query import MultiMatch
+# to:
+from elasticsearch.dsl.query import MultiMatch
+
+# from:
+import elasticsearch_dsl as dsl
+# to:
+from elasticsearch import dsl
+
+# from:
+import elasticsearch_dsl
+# to:
+from elasticsearch import dsl as elasticsearch_dsl
+
+# from:
+import elasticsearch_dsl
+# to:
+from elasticsearch import dsl
+# and replace all references to "elasticsearch_dsl" in the code with "dsl"
+```
diff -pruN 8.17.2-2/docs/reference/dsl_tutorials.md 9.2.0-1/docs/reference/dsl_tutorials.md
--- 8.17.2-2/docs/reference/dsl_tutorials.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/dsl_tutorials.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,250 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/_tutorials.html
+---
+
+# Tutorials [_tutorials]
+
+## Search [_search]
+
+Let’s have a typical search request written directly as a `dict`:
+
+```python
+from elasticsearch import Elasticsearch
+client = Elasticsearch("https://localhost:9200")
+
+response = client.search(
+    index="my-index",
+    body={
+      "query": {
+        "bool": {
+          "must": [{"match": {"title": "python"}}],
+          "must_not": [{"match": {"description": "beta"}}],
+          "filter": [{"term": {"category": "search"}}]
+        }
+      },
+      "aggs" : {
+        "per_tag": {
+          "terms": {"field": "tags"},
+          "aggs": {
+            "max_lines": {"max": {"field": "lines"}}
+          }
+        }
+      }
+    }
+)
+
+for hit in response['hits']['hits']:
+    print(hit['_score'], hit['_source']['title'])
+
+for tag in response['aggregations']['per_tag']['buckets']:
+    print(tag['key'], tag['max_lines']['value'])
+```
+
+The problem with this approach is that it is very verbose, prone to syntax mistakes like incorrect nesting, hard to modify (eg. adding another filter) and definitely not fun to write.
+
+Let’s rewrite the example using the DSL module:
+
+```python
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import Search, query, aggs
+
+client = Elasticsearch("https://localhost:9200")
+
+s = Search(using=client, index="my-index") \
+    .query(query.Match("title", "python"))   \
+    .filter(query.Term("category", "search")) \
+    .exclude(query.Match("description", "beta"))
+
+s.aggs.bucket('per_tag', aggs.Terms(field="tags")) \
+    .metric('max_lines', aggs.Max(field='lines'))
+
+response = s.execute()
+
+for hit in response:
+    print(hit.meta.score, hit.title)
+
+for tag in response.aggregations.per_tag.buckets:
+    print(tag.key, tag.max_lines.value)
+```
+
+As you see, the DSL module took care of:
+
+* creating appropriate `Query` objects from classes
+* composing queries into a compound `bool` query
+* putting the `term` query in a filter context of the `bool` query
+* providing a convenient access to response data
+* no curly or square brackets everywhere
+
+
+## Persistence [_persistence]
+
+Let’s have a simple Python class representing an article in a blogging system:
+
+```python
+from datetime import datetime
+from elasticsearch.dsl import Document, Date, Integer, Keyword, Text, connections, mapped_field
+
+# Define a default Elasticsearch client
+connections.create_connection(hosts="https://localhost:9200")
+
+class Article(Document):
+    title: str = mapped_field(Text(analyzer='snowball', fields={'raw': Keyword()}))
+    body: str = mapped_field(Text(analyzer='snowball'))
+    tags: list[str] = mapped_field(Keyword())
+    published_from: datetime
+    lines: int
+
+    class Index:
+        name = 'blog'
+        settings = {
+          "number_of_shards": 2,
+        }
+
+    def save(self, **kwargs):
+        self.lines = len(self.body.split())
+        return super(Article, self).save(** kwargs)
+
+    def is_published(self):
+        return datetime.now() > self.published_from
+
+# create the mappings in elasticsearch
+Article.init()
+
+# create and save and article
+article = Article(meta={'id': 42}, title='Hello world!', tags=['test'])
+article.body = ''' looong text '''
+article.published_from = datetime.now()
+article.save()
+
+article = Article.get(id=42)
+print(article.is_published())
+
+# Display cluster health
+print(connections.get_connection().cluster.health())
+```
+
+In this example you can see:
+
+* providing a default connection
+* defining fields with Python type hints and additional mapping configuration when necessary
+* setting index name
+* defining custom methods
+* overriding the built-in `.save()` method to hook into the persistence life cycle
+* retrieving and saving the object into Elasticsearch
+* accessing the underlying client for other APIs
+
+You can see more in the [persistence](dsl_how_to_guides.md#_persistence_2) chapter.
+
+
+## Pre-built Faceted Search [_pre_built_faceted_search]
+
+If you have your `Document`s defined you can very easily create a faceted search class to simplify searching and filtering.
+
+```python
+from elasticsearch.dsl import FacetedSearch, TermsFacet, DateHistogramFacet
+
+class BlogSearch(FacetedSearch):
+    doc_types = [Article, ]
+    # fields that should be searched
+    fields = ['tags', 'title', 'body']
+
+    facets = {
+        # use bucket aggregations to define facets
+        'tags': TermsFacet(field='tags'),
+        'publishing_frequency': DateHistogramFacet(field='published_from', interval='month')
+    }
+
+# empty search
+bs = BlogSearch()
+response = bs.execute()
+
+for hit in response:
+    print(hit.meta.score, hit.title)
+
+for (tag, count, selected) in response.facets.tags:
+    print(tag, ' (SELECTED):' if selected else ':', count)
+
+for (month, count, selected) in response.facets.publishing_frequency:
+    print(month.strftime('%B %Y'), ' (SELECTED):' if selected else ':', count)
+```
+
+You can find more details in the `faceted_search` chapter.
+
+
+## Update By Query [_update_by_query]
+
+Let’s resume the simple example of articles on a blog, and let’s assume that each article has a number of likes. For this example, imagine we want to increment the number of likes by 1 for all articles that match a certain tag and do not match a certain description. Writing this as a `dict`, we would have the following code:
+
+```python
+from elasticsearch import Elasticsearch
+client = Elasticsearch()
+
+response = client.update_by_query(
+    index="my-index",
+    body={
+      "query": {
+        "bool": {
+          "must": [{"match": {"tag": "python"}}],
+          "must_not": [{"match": {"description": "beta"}}]
+        }
+      },
+      "script"={
+        "source": "ctx._source.likes++",
+        "lang": "painless"
+      }
+    },
+  )
+```
+
+Using the DSL, we can now express this query as such:
+
+```python
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import Search, UpdateByQuery
+from elasticsearch.dsl.query import Match
+
+client = Elasticsearch()
+ubq = UpdateByQuery(using=client, index="my-index") \
+      .query(Match("title", "python"))   \
+      .exclude(Match("description", "beta")) \
+      .script(source="ctx._source.likes++", lang="painless")
+
+response = ubq.execute()
+```
+
+As you can see, the `Update By Query` object provides many of the savings offered by the `Search` object, and additionally allows one to update the results of the search based on a script assigned in the same manner.
+
+
+## ES|QL Queries
+
+The DSL module features an integration with the ES|QL query builder, consisting of two methods available in all `Document` sub-classes: `esql_from()` and `esql_execute()`. Using the `Article` document from above, we can search for up to ten articles that include `"world"` in their titles with the following ES|QL query:
+
+```python
+from elasticsearch.esql import functions
+
+query = Article.esql_from().where(functions.match(Article.title, 'world')).limit(10)
+for a in Article.esql_execute(query):
+    print(a.title)
+```
+
+Review the [ES|QL Query Builder section](esql-query-builder.md) to learn more about building ES|QL queries in Python.
+
+## Migration from the standard client [_migration_from_the_standard_client]
+
+You don’t have to port your entire application to get the benefits of the DSL module, you can start gradually by creating a `Search` object from your existing `dict`, modifying it using the API and serializing it back to a `dict`:
+
+```python
+body = {...} # insert complicated query here
+
+# Convert to Search object
+s = Search.from_dict(body)
+
+# Add some filters, aggregations, queries, ...
+s.filter(query.Term("tags", "python"))
+
+# Convert back to dict to plug back into existing code
+body = s.to_dict()
+```
+
+
diff -pruN 8.17.2-2/docs/reference/elasticsearch-dsl.md 9.2.0-1/docs/reference/elasticsearch-dsl.md
--- 8.17.2-2/docs/reference/elasticsearch-dsl.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/elasticsearch-dsl.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,44 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/elasticsearch-dsl.html
+---
+
+# Elasticsearch Python DSL [elasticsearch-dsl]
+
+Elasticsearch DSL is a module of the official Python client that aims to help with writing and running queries against Elasticsearch in a more convenient and idiomatic way. It stays close to the Elasticsearch JSON DSL, mirroring its terminology and structure. It exposes the whole range of the DSL from Python either directly using defined classes or a queryset-like expressions. Here is an example:
+
+```python
+from elasticsearch.dsl import Search
+from elasticsearch.dsl.query import Match, Term
+
+s = Search(index="my-index") \
+    .query(Match("title", "python")) \
+    .filter(Term("category", "search")) \
+    .exclude(Match("description", "beta"))
+for hit in s:
+    print(hit.title)
+```
+
+Or with asynchronous Python:
+
+```python
+from elasticsearch.dsl import AsyncSearch
+from elasticsearch.dsl.query import Match, Term
+
+async def run_query():
+    s = AsyncSearch(index="my-index") \
+        .query(Match("title", "python")) \
+        .filter(Term("category", "search")) \
+        .exclude(Match("description", "beta"))
+    async for hit in s:
+        print(hit.title)
+```
+
+It also provides an optional wrapper for working with documents as Python objects: defining mappings, retrieving and saving documents, wrapping the document data in user-defined classes.
+
+To use the other Elasticsearch APIs (eg. cluster health) just use the regular client.
+
+
+
+
+
diff -pruN 8.17.2-2/docs/reference/esql-pandas.md 9.2.0-1/docs/reference/esql-pandas.md
--- 8.17.2-2/docs/reference/esql-pandas.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/esql-pandas.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,422 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/esql-pandas.html
+---
+
+# ES|QL and Pandas [esql-pandas]
+
+The [Elasticsearch Query Language (ES|QL)](docs-content://explore-analyze/query-filter/languages/esql.md) provides a powerful way to filter, transform, and analyze data stored in {{es}}. Designed to be easy to learn and use, it is a perfect fit for data scientists familiar with Pandas and other dataframe-based libraries. ES|QL queries produce tables with named columns, which is the definition of dataframes.
+
+This page shows you an example of using ES|QL and Pandas together to work with dataframes.
+
+
+## Import data [import-data]
+
+Use the [`employees` sample data](https://github.com/elastic/elasticsearch/blob/main/x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees.csv) and [mapping](https://github.com/elastic/elasticsearch/blob/main/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-default.json). The easiest way to load this dataset is to run [two Elasticsearch API requests](https://gist.github.com/pquentin/7cf29a5932cf52b293699dd994b1a276) in the Kibana Console.
+
+::::{dropdown} Index mapping request
+```console
+PUT employees
+{
+  "mappings": {
+    "properties": {
+      "avg_worked_seconds": {
+        "type": "long"
+      },
+      "birth_date": {
+        "type": "date"
+      },
+      "emp_no": {
+        "type": "integer"
+      },
+      "first_name": {
+        "type": "keyword"
+      },
+      "gender": {
+        "type": "keyword"
+      },
+      "height": {
+        "type": "double",
+        "fields": {
+          "float": {
+            "type": "float"
+          },
+          "half_float": {
+            "type": "half_float"
+          },
+          "scaled_float": {
+            "type": "scaled_float",
+            "scaling_factor": 100
+          }
+        }
+      },
+      "hire_date": {
+        "type": "date"
+      },
+      "is_rehired": {
+        "type": "boolean"
+      },
+      "job_positions": {
+        "type": "keyword"
+      },
+      "languages": {
+        "type": "integer",
+        "fields": {
+          "byte": {
+            "type": "byte"
+          },
+          "long": {
+            "type": "long"
+          },
+          "short": {
+            "type": "short"
+          }
+        }
+      },
+      "last_name": {
+        "type": "keyword"
+      },
+      "salary": {
+        "type": "integer"
+      },
+      "salary_change": {
+        "type": "double",
+        "fields": {
+          "int": {
+            "type": "integer"
+          },
+          "keyword": {
+            "type": "keyword"
+          },
+          "long": {
+            "type": "long"
+          }
+        }
+      },
+      "still_hired": {
+        "type": "boolean"
+      }
+    }
+  }
+}
+```
+
+::::
+
+
+::::{dropdown} Bulk request to ingest data
+```console
+PUT employees/_bulk
+{ "index": {}}
+{"birth_date":"1953-09-02T00:00:00Z","emp_no":"10001","first_name":"Georgi","gender":"M","hire_date":"1986-06-26T00:00:00Z","languages":"2","last_name":"Facello","salary":"57305","height":"2.03","still_hired":"true","avg_worked_seconds":"268728049","job_positions":["Senior Python Developer","Accountant"],"is_rehired":["false","true"],"salary_change":"1.19"}
+{ "index": {}}
+{"birth_date":"1964-06-02T00:00:00Z","emp_no":"10002","first_name":"Bezalel","gender":"F","hire_date":"1985-11-21T00:00:00Z","languages":"5","last_name":"Simmel","salary":"56371","height":"2.08","still_hired":"true","avg_worked_seconds":"328922887","job_positions":"Senior Team Lead","is_rehired":["false","false"],"salary_change":["-7.23","11.17"]}
+{ "index": {}}
+{"birth_date":"1959-12-03T00:00:00Z","emp_no":"10003","first_name":"Parto","gender":"M","hire_date":"1986-08-28T00:00:00Z","languages":"4","last_name":"Bamford","salary":"61805","height":"1.83","still_hired":"false","avg_worked_seconds":"200296405","salary_change":["14.68","12.82"]}
+{ "index": {}}
+{"birth_date":"1954-05-01T00:00:00Z","emp_no":"10004","first_name":"Chirstian","gender":"M","hire_date":"1986-12-01T00:00:00Z","languages":"5","last_name":"Koblick","salary":"36174","height":"1.78","still_hired":"true","avg_worked_seconds":"311267831","job_positions":["Reporting Analyst","Tech Lead","Head Human Resources","Support Engineer"],"is_rehired":"true","salary_change":["3.65","-0.35","1.13","13.48"]}
+{ "index": {}}
+{"birth_date":"1955-01-21T00:00:00Z","emp_no":"10005","first_name":"Kyoichi","gender":"M","hire_date":"1989-09-12T00:00:00Z","languages":"1","last_name":"Maliniak","salary":"63528","height":"2.05","still_hired":"true","avg_worked_seconds":"244294991","is_rehired":["false","false","false","true"],"salary_change":["-2.14","13.07"]}
+{ "index": {}}
+{"birth_date":"1953-04-20T00:00:00Z","emp_no":"10006","first_name":"Anneke","gender":"F","hire_date":"1989-06-02T00:00:00Z","languages":"3","last_name":"Preusig","salary":"60335","height":"1.56","still_hired":"false","avg_worked_seconds":"372957040","job_positions":["Tech Lead","Principal Support Engineer","Senior Team Lead"],"salary_change":"-3.90"}
+{ "index": {}}
+{"birth_date":"1957-05-23T00:00:00Z","emp_no":"10007","first_name":"Tzvetan","gender":"F","hire_date":"1989-02-10T00:00:00Z","languages":"4","last_name":"Zielinski","salary":"74572","height":"1.70","still_hired":"true","avg_worked_seconds":"393084805","is_rehired":["true","false","true","false"],"salary_change":["-7.06","1.99","0.57"]}
+{ "index": {}}
+{"birth_date":"1958-02-19T00:00:00Z","emp_no":"10008","first_name":"Saniya","gender":"M","hire_date":"1994-09-15T00:00:00Z","languages":"2","last_name":"Kalloufi","salary":"43906","height":"2.10","still_hired":"true","avg_worked_seconds":"283074758","job_positions":["Senior Python Developer","Junior Developer","Purchase Manager","Internship"],"is_rehired":["true","false"],"salary_change":["12.68","3.54","0.75","-2.92"]}
+{ "index": {}}
+{"birth_date":"1952-04-19T00:00:00Z","emp_no":"10009","first_name":"Sumant","gender":"F","hire_date":"1985-02-18T00:00:00Z","languages":"1","last_name":"Peac","salary":"66174","height":"1.85","still_hired":"false","avg_worked_seconds":"236805489","job_positions":["Senior Python Developer","Internship"]}
+{ "index": {}}
+{"birth_date":"1963-06-01T00:00:00Z","emp_no":"10010","first_name":"Duangkaew","hire_date":"1989-08-24T00:00:00Z","languages":"4","last_name":"Piveteau","salary":"45797","height":"1.70","still_hired":"false","avg_worked_seconds":"315236372","job_positions":["Architect","Reporting Analyst","Tech Lead","Purchase Manager"],"is_rehired":["true","true","false","false"],"salary_change":["5.05","-6.77","4.69","12.15"]}
+{ "index": {}}
+{"birth_date":"1953-11-07T00:00:00Z","emp_no":"10011","first_name":"Mary","hire_date":"1990-01-22T00:00:00Z","languages":"5","last_name":"Sluis","salary":"31120","height":"1.50","still_hired":"true","avg_worked_seconds":"239615525","job_positions":["Architect","Reporting Analyst","Tech Lead","Senior Team Lead"],"is_rehired":["true","true"],"salary_change":["10.35","-7.82","8.73","3.48"]}
+{ "index": {}}
+{"birth_date":"1960-10-04T00:00:00Z","emp_no":"10012","first_name":"Patricio","hire_date":"1992-12-18T00:00:00Z","languages":"5","last_name":"Bridgland","salary":"48942","height":"1.97","still_hired":"false","avg_worked_seconds":"365510850","job_positions":["Head Human Resources","Accountant"],"is_rehired":["false","true","true","false"],"salary_change":"0.04"}
+{ "index": {}}
+{"birth_date":"1963-06-07T00:00:00Z","emp_no":"10013","first_name":"Eberhardt","hire_date":"1985-10-20T00:00:00Z","languages":"1","last_name":"Terkki","salary":"48735","height":"1.94","still_hired":"true","avg_worked_seconds":"253864340","job_positions":"Reporting Analyst","is_rehired":["true","true"]}
+{ "index": {}}
+{"birth_date":"1956-02-12T00:00:00Z","emp_no":"10014","first_name":"Berni","hire_date":"1987-03-11T00:00:00Z","languages":"5","last_name":"Genin","salary":"37137","height":"1.99","still_hired":"false","avg_worked_seconds":"225049139","job_positions":["Reporting Analyst","Data Scientist","Head Human Resources"],"salary_change":["-1.89","9.07"]}
+{ "index": {}}
+{"birth_date":"1959-08-19T00:00:00Z","emp_no":"10015","first_name":"Guoxiang","hire_date":"1987-07-02T00:00:00Z","languages":"5","last_name":"Nooteboom","salary":"25324","height":"1.66","still_hired":"true","avg_worked_seconds":"390266432","job_positions":["Principal Support Engineer","Junior Developer","Head Human Resources","Support Engineer"],"is_rehired":["true","false","false","false"],"salary_change":["14.25","12.40"]}
+{ "index": {}}
+{"birth_date":"1961-05-02T00:00:00Z","emp_no":"10016","first_name":"Kazuhito","hire_date":"1995-01-27T00:00:00Z","languages":"2","last_name":"Cappelletti","salary":"61358","height":"1.54","still_hired":"false","avg_worked_seconds":"253029411","job_positions":["Reporting Analyst","Python Developer","Accountant","Purchase Manager"],"is_rehired":["false","false"],"salary_change":["-5.18","7.69"]}
+{ "index": {}}
+{"birth_date":"1958-07-06T00:00:00Z","emp_no":"10017","first_name":"Cristinel","hire_date":"1993-08-03T00:00:00Z","languages":"2","last_name":"Bouloucos","salary":"58715","height":"1.74","still_hired":"false","avg_worked_seconds":"236703986","job_positions":["Data Scientist","Head Human Resources","Purchase Manager"],"is_rehired":["true","false","true","true"],"salary_change":"-6.33"}
+{ "index": {}}
+{"birth_date":"1954-06-19T00:00:00Z","emp_no":"10018","first_name":"Kazuhide","hire_date":"1987-04-03T00:00:00Z","languages":"2","last_name":"Peha","salary":"56760","height":"1.97","still_hired":"false","avg_worked_seconds":"309604079","job_positions":"Junior Developer","is_rehired":["false","false","true","true"],"salary_change":["-1.64","11.51","-5.32"]}
+{ "index": {}}
+{"birth_date":"1953-01-23T00:00:00Z","emp_no":"10019","first_name":"Lillian","hire_date":"1999-04-30T00:00:00Z","languages":"1","last_name":"Haddadi","salary":"73717","height":"2.06","still_hired":"false","avg_worked_seconds":"342855721","job_positions":"Purchase Manager","is_rehired":["false","false"],"salary_change":["-6.84","8.42","-7.26"]}
+{ "index": {}}
+{"birth_date":"1952-12-24T00:00:00Z","emp_no":"10020","first_name":"Mayuko","gender":"M","hire_date":"1991-01-26T00:00:00Z","last_name":"Warwick","salary":"40031","height":"1.41","still_hired":"false","avg_worked_seconds":"373309605","job_positions":"Tech Lead","is_rehired":["true","true","false"],"salary_change":"-5.81"}
+{ "index": {}}
+{"birth_date":"1960-02-20T00:00:00Z","emp_no":"10021","first_name":"Ramzi","gender":"M","hire_date":"1988-02-10T00:00:00Z","last_name":"Erde","salary":"60408","height":"1.47","still_hired":"false","avg_worked_seconds":"287654610","job_positions":"Support Engineer","is_rehired":"true"}
+{ "index": {}}
+{"birth_date":"1952-07-08T00:00:00Z","emp_no":"10022","first_name":"Shahaf","gender":"M","hire_date":"1995-08-22T00:00:00Z","last_name":"Famili","salary":"48233","height":"1.82","still_hired":"false","avg_worked_seconds":"233521306","job_positions":["Reporting Analyst","Data Scientist","Python Developer","Internship"],"is_rehired":["true","false"],"salary_change":["12.09","2.85"]}
+{ "index": {}}
+{"birth_date":"1953-09-29T00:00:00Z","emp_no":"10023","first_name":"Bojan","gender":"F","hire_date":"1989-12-17T00:00:00Z","last_name":"Montemayor","salary":"47896","height":"1.75","still_hired":"true","avg_worked_seconds":"330870342","job_positions":["Accountant","Support Engineer","Purchase Manager"],"is_rehired":["true","true","false"],"salary_change":["14.63","0.80"]}
+{ "index": {}}
+{"birth_date":"1958-09-05T00:00:00Z","emp_no":"10024","first_name":"Suzette","gender":"F","hire_date":"1997-05-19T00:00:00Z","last_name":"Pettey","salary":"64675","height":"2.08","still_hired":"true","avg_worked_seconds":"367717671","job_positions":"Junior Developer","is_rehired":["true","true","true","true"]}
+{ "index": {}}
+{"birth_date":"1958-10-31T00:00:00Z","emp_no":"10025","first_name":"Prasadram","gender":"M","hire_date":"1987-08-17T00:00:00Z","last_name":"Heyers","salary":"47411","height":"1.87","still_hired":"false","avg_worked_seconds":"371270797","job_positions":"Accountant","is_rehired":["true","false"],"salary_change":["-4.33","-2.90","12.06","-3.46"]}
+{ "index": {}}
+{"birth_date":"1953-04-03T00:00:00Z","emp_no":"10026","first_name":"Yongqiao","gender":"M","hire_date":"1995-03-20T00:00:00Z","last_name":"Berztiss","salary":"28336","height":"2.10","still_hired":"true","avg_worked_seconds":"359208133","job_positions":"Reporting Analyst","is_rehired":["false","true"],"salary_change":["-7.37","10.62","11.20"]}
+{ "index": {}}
+{"birth_date":"1962-07-10T00:00:00Z","emp_no":"10027","first_name":"Divier","gender":"F","hire_date":"1989-07-07T00:00:00Z","last_name":"Reistad","salary":"73851","height":"1.53","still_hired":"false","avg_worked_seconds":"374037782","job_positions":"Senior Python Developer","is_rehired":"false"}
+{ "index": {}}
+{"birth_date":"1963-11-26T00:00:00Z","emp_no":"10028","first_name":"Domenick","gender":"M","hire_date":"1991-10-22T00:00:00Z","last_name":"Tempesti","salary":"39356","height":"2.07","still_hired":"true","avg_worked_seconds":"226435054","job_positions":["Tech Lead","Python Developer","Accountant","Internship"],"is_rehired":["true","false","false","true"]}
+{ "index": {}}
+{"birth_date":"1956-12-13T00:00:00Z","emp_no":"10029","first_name":"Otmar","gender":"M","hire_date":"1985-11-20T00:00:00Z","last_name":"Herbst","salary":"74999","height":"1.99","still_hired":"false","avg_worked_seconds":"257694181","job_positions":["Senior Python Developer","Data Scientist","Principal Support Engineer"],"is_rehired":"true","salary_change":["-0.32","-1.90","-8.19"]}
+{ "index": {}}
+{"birth_date":"1958-07-14T00:00:00Z","emp_no":"10030","gender":"M","hire_date":"1994-02-17T00:00:00Z","languages":"3","last_name":"Demeyer","salary":"67492","height":"1.92","still_hired":"false","avg_worked_seconds":"394597613","job_positions":["Tech Lead","Data Scientist","Senior Team Lead"],"is_rehired":["true","false","false"],"salary_change":"-0.40"}
+{ "index": {}}
+{"birth_date":"1959-01-27T00:00:00Z","emp_no":"10031","gender":"M","hire_date":"1991-09-01T00:00:00Z","languages":"4","last_name":"Joslin","salary":"37716","height":"1.68","still_hired":"false","avg_worked_seconds":"348545109","job_positions":["Architect","Senior Python Developer","Purchase Manager","Senior Team Lead"],"is_rehired":"false"}
+{ "index": {}}
+{"birth_date":"1960-08-09T00:00:00Z","emp_no":"10032","gender":"F","hire_date":"1990-06-20T00:00:00Z","languages":"3","last_name":"Reistad","salary":"62233","height":"2.10","still_hired":"false","avg_worked_seconds":"277622619","job_positions":["Architect","Senior Python Developer","Junior Developer","Purchase Manager"],"is_rehired":["false","false"],"salary_change":["9.32","-4.92"]}
+{ "index": {}}
+{"birth_date":"1956-11-14T00:00:00Z","emp_no":"10033","gender":"M","hire_date":"1987-03-18T00:00:00Z","languages":"1","last_name":"Merlo","salary":"70011","height":"1.63","still_hired":"false","avg_worked_seconds":"208374744","is_rehired":"true"}
+{ "index": {}}
+{"birth_date":"1962-12-29T00:00:00Z","emp_no":"10034","gender":"M","hire_date":"1988-09-21T00:00:00Z","languages":"1","last_name":"Swan","salary":"39878","height":"1.46","still_hired":"false","avg_worked_seconds":"214393176","job_positions":["Business Analyst","Data Scientist","Python Developer","Accountant"],"is_rehired":"false","salary_change":"-8.46"}
+{ "index": {}}
+{"birth_date":"1953-02-08T00:00:00Z","emp_no":"10035","gender":"M","hire_date":"1988-09-05T00:00:00Z","languages":"5","last_name":"Chappelet","salary":"25945","height":"1.81","still_hired":"false","avg_worked_seconds":"203838153","job_positions":["Senior Python Developer","Data Scientist"],"is_rehired":"false","salary_change":["-2.54","-6.58"]}
+{ "index": {}}
+{"birth_date":"1959-08-10T00:00:00Z","emp_no":"10036","gender":"M","hire_date":"1992-01-03T00:00:00Z","languages":"4","last_name":"Portugali","salary":"60781","height":"1.61","still_hired":"false","avg_worked_seconds":"305493131","job_positions":"Senior Python Developer","is_rehired":["true","false","false"]}
+{ "index": {}}
+{"birth_date":"1963-07-22T00:00:00Z","emp_no":"10037","gender":"M","hire_date":"1990-12-05T00:00:00Z","languages":"2","last_name":"Makrucki","salary":"37691","height":"2.00","still_hired":"true","avg_worked_seconds":"359217000","job_positions":["Senior Python Developer","Tech Lead","Accountant"],"is_rehired":"false","salary_change":"-7.08"}
+{ "index": {}}
+{"birth_date":"1960-07-20T00:00:00Z","emp_no":"10038","gender":"M","hire_date":"1989-09-20T00:00:00Z","languages":"4","last_name":"Lortz","salary":"35222","height":"1.53","still_hired":"true","avg_worked_seconds":"314036411","job_positions":["Senior Python Developer","Python Developer","Support Engineer"]}
+{ "index": {}}
+{"birth_date":"1959-10-01T00:00:00Z","emp_no":"10039","gender":"M","hire_date":"1988-01-19T00:00:00Z","languages":"2","last_name":"Brender","salary":"36051","height":"1.55","still_hired":"false","avg_worked_seconds":"243221262","job_positions":["Business Analyst","Python Developer","Principal Support Engineer"],"is_rehired":["true","true"],"salary_change":"-6.90"}
+{ "index": {}}
+{"emp_no":"10040","first_name":"Weiyi","gender":"F","hire_date":"1993-02-14T00:00:00Z","languages":"4","last_name":"Meriste","salary":"37112","height":"1.90","still_hired":"false","avg_worked_seconds":"244478622","job_positions":"Principal Support Engineer","is_rehired":["true","false","true","true"],"salary_change":["6.97","14.74","-8.94","1.92"]}
+{ "index": {}}
+{"emp_no":"10041","first_name":"Uri","gender":"F","hire_date":"1989-11-12T00:00:00Z","languages":"1","last_name":"Lenart","salary":"56415","height":"1.75","still_hired":"false","avg_worked_seconds":"287789442","job_positions":["Data Scientist","Head Human Resources","Internship","Senior Team Lead"],"salary_change":["9.21","0.05","7.29","-2.94"]}
+{ "index": {}}
+{"emp_no":"10042","first_name":"Magy","gender":"F","hire_date":"1993-03-21T00:00:00Z","languages":"3","last_name":"Stamatiou","salary":"30404","height":"1.44","still_hired":"true","avg_worked_seconds":"246355863","job_positions":["Architect","Business Analyst","Junior Developer","Internship"],"salary_change":["-9.28","9.42"]}
+{ "index": {}}
+{"emp_no":"10043","first_name":"Yishay","gender":"M","hire_date":"1990-10-20T00:00:00Z","languages":"1","last_name":"Tzvieli","salary":"34341","height":"1.52","still_hired":"true","avg_worked_seconds":"287222180","job_positions":["Data Scientist","Python Developer","Support Engineer"],"is_rehired":["false","true","true"],"salary_change":["-5.17","4.62","7.42"]}
+{ "index": {}}
+{"emp_no":"10044","first_name":"Mingsen","gender":"F","hire_date":"1994-05-21T00:00:00Z","languages":"1","last_name":"Casley","salary":"39728","height":"2.06","still_hired":"false","avg_worked_seconds":"387408356","job_positions":["Tech Lead","Principal Support Engineer","Accountant","Support Engineer"],"is_rehired":["true","true"],"salary_change":"8.09"}
+{ "index": {}}
+{"emp_no":"10045","first_name":"Moss","gender":"M","hire_date":"1989-09-02T00:00:00Z","languages":"3","last_name":"Shanbhogue","salary":"74970","height":"1.70","still_hired":"false","avg_worked_seconds":"371418933","job_positions":["Principal Support Engineer","Junior Developer","Accountant","Purchase Manager"],"is_rehired":["true","false"]}
+{ "index": {}}
+{"emp_no":"10046","first_name":"Lucien","gender":"M","hire_date":"1992-06-20T00:00:00Z","languages":"4","last_name":"Rosenbaum","salary":"50064","height":"1.52","still_hired":"true","avg_worked_seconds":"302353405","job_positions":["Principal Support Engineer","Junior Developer","Head Human Resources","Internship"],"is_rehired":["true","true","false","true"],"salary_change":"2.39"}
+{ "index": {}}
+{"emp_no":"10047","first_name":"Zvonko","gender":"M","hire_date":"1989-03-31T00:00:00Z","languages":"4","last_name":"Nyanchama","salary":"42716","height":"1.52","still_hired":"true","avg_worked_seconds":"306369346","job_positions":["Architect","Data Scientist","Principal Support Engineer","Senior Team Lead"],"is_rehired":"true","salary_change":["-6.36","12.12"]}
+{ "index": {}}
+{"emp_no":"10048","first_name":"Florian","gender":"M","hire_date":"1985-02-24T00:00:00Z","languages":"3","last_name":"Syrotiuk","salary":"26436","height":"2.00","still_hired":"false","avg_worked_seconds":"248451647","job_positions":"Internship","is_rehired":["true","true"]}
+{ "index": {}}
+{"emp_no":"10049","first_name":"Basil","gender":"F","hire_date":"1992-05-04T00:00:00Z","languages":"5","last_name":"Tramer","salary":"37853","height":"1.52","still_hired":"true","avg_worked_seconds":"320725709","job_positions":["Senior Python Developer","Business Analyst"],"salary_change":"-1.05"}
+{ "index": {}}
+{"birth_date":"1958-05-21T00:00:00Z","emp_no":"10050","first_name":"Yinghua","gender":"M","hire_date":"1990-12-25T00:00:00Z","languages":"2","last_name":"Dredge","salary":"43026","height":"1.96","still_hired":"true","avg_worked_seconds":"242731798","job_positions":["Reporting Analyst","Junior Developer","Accountant","Support Engineer"],"is_rehired":"true","salary_change":["8.70","10.94"]}
+{ "index": {}}
+{"birth_date":"1953-07-28T00:00:00Z","emp_no":"10051","first_name":"Hidefumi","gender":"M","hire_date":"1992-10-15T00:00:00Z","languages":"3","last_name":"Caine","salary":"58121","height":"1.89","still_hired":"true","avg_worked_seconds":"374753122","job_positions":["Business Analyst","Accountant","Purchase Manager"]}
+{ "index": {}}
+{"birth_date":"1961-02-26T00:00:00Z","emp_no":"10052","first_name":"Heping","gender":"M","hire_date":"1988-05-21T00:00:00Z","languages":"1","last_name":"Nitsch","salary":"55360","height":"1.79","still_hired":"true","avg_worked_seconds":"299654717","is_rehired":["true","true","false"],"salary_change":["-0.55","-1.89","-4.22","-6.03"]}
+{ "index": {}}
+{"birth_date":"1954-09-13T00:00:00Z","emp_no":"10053","first_name":"Sanjiv","gender":"F","hire_date":"1986-02-04T00:00:00Z","languages":"3","last_name":"Zschoche","salary":"54462","height":"1.58","still_hired":"false","avg_worked_seconds":"368103911","job_positions":"Support Engineer","is_rehired":["true","false","true","false"],"salary_change":["-7.67","-3.25"]}
+{ "index": {}}
+{"birth_date":"1957-04-04T00:00:00Z","emp_no":"10054","first_name":"Mayumi","gender":"M","hire_date":"1995-03-13T00:00:00Z","languages":"4","last_name":"Schueller","salary":"65367","height":"1.82","still_hired":"false","avg_worked_seconds":"297441693","job_positions":"Principal Support Engineer","is_rehired":["false","false"]}
+{ "index": {}}
+{"birth_date":"1956-06-06T00:00:00Z","emp_no":"10055","first_name":"Georgy","gender":"M","hire_date":"1992-04-27T00:00:00Z","languages":"5","last_name":"Dredge","salary":"49281","height":"2.04","still_hired":"false","avg_worked_seconds":"283157844","job_positions":["Senior Python Developer","Head Human Resources","Internship","Support Engineer"],"is_rehired":["false","false","true"],"salary_change":["7.34","12.99","3.17"]}
+{ "index": {}}
+{"birth_date":"1961-09-01T00:00:00Z","emp_no":"10056","first_name":"Brendon","gender":"F","hire_date":"1990-02-01T00:00:00Z","languages":"2","last_name":"Bernini","salary":"33370","height":"1.57","still_hired":"true","avg_worked_seconds":"349086555","job_positions":"Senior Team Lead","is_rehired":["true","false","false"],"salary_change":["10.99","-5.17"]}
+{ "index": {}}
+{"birth_date":"1954-05-30T00:00:00Z","emp_no":"10057","first_name":"Ebbe","gender":"F","hire_date":"1992-01-15T00:00:00Z","languages":"4","last_name":"Callaway","salary":"27215","height":"1.59","still_hired":"true","avg_worked_seconds":"324356269","job_positions":["Python Developer","Head Human Resources"],"salary_change":["-6.73","-2.43","-5.27","1.03"]}
+{ "index": {}}
+{"birth_date":"1954-10-01T00:00:00Z","emp_no":"10058","first_name":"Berhard","gender":"M","hire_date":"1987-04-13T00:00:00Z","languages":"3","last_name":"McFarlin","salary":"38376","height":"1.83","still_hired":"false","avg_worked_seconds":"268378108","job_positions":"Principal Support Engineer","salary_change":"-4.89"}
+{ "index": {}}
+{"birth_date":"1953-09-19T00:00:00Z","emp_no":"10059","first_name":"Alejandro","gender":"F","hire_date":"1991-06-26T00:00:00Z","languages":"2","last_name":"McAlpine","salary":"44307","height":"1.48","still_hired":"false","avg_worked_seconds":"237368465","job_positions":["Architect","Principal Support Engineer","Purchase Manager","Senior Team Lead"],"is_rehired":"false","salary_change":["5.53","13.38","-4.69","6.27"]}
+{ "index": {}}
+{"birth_date":"1961-10-15T00:00:00Z","emp_no":"10060","first_name":"Breannda","gender":"M","hire_date":"1987-11-02T00:00:00Z","languages":"2","last_name":"Billingsley","salary":"29175","height":"1.42","still_hired":"true","avg_worked_seconds":"341158890","job_positions":["Business Analyst","Data Scientist","Senior Team Lead"],"is_rehired":["false","false","true","false"],"salary_change":["-1.76","-0.85"]}
+{ "index": {}}
+{"birth_date":"1962-10-19T00:00:00Z","emp_no":"10061","first_name":"Tse","gender":"M","hire_date":"1985-09-17T00:00:00Z","languages":"1","last_name":"Herber","salary":"49095","height":"1.45","still_hired":"false","avg_worked_seconds":"327550310","job_positions":["Purchase Manager","Senior Team Lead"],"is_rehired":["false","true"],"salary_change":["14.39","-2.58","-0.95"]}
+{ "index": {}}
+{"birth_date":"1961-11-02T00:00:00Z","emp_no":"10062","first_name":"Anoosh","gender":"M","hire_date":"1991-08-30T00:00:00Z","languages":"3","last_name":"Peyn","salary":"65030","height":"1.70","still_hired":"false","avg_worked_seconds":"203989706","job_positions":["Python Developer","Senior Team Lead"],"is_rehired":["false","true","true"],"salary_change":"-1.17"}
+{ "index": {}}
+{"birth_date":"1952-08-06T00:00:00Z","emp_no":"10063","first_name":"Gino","gender":"F","hire_date":"1989-04-08T00:00:00Z","languages":"3","last_name":"Leonhardt","salary":"52121","height":"1.78","still_hired":"true","avg_worked_seconds":"214068302","is_rehired":"true"}
+{ "index": {}}
+{"birth_date":"1959-04-07T00:00:00Z","emp_no":"10064","first_name":"Udi","gender":"M","hire_date":"1985-11-20T00:00:00Z","languages":"5","last_name":"Jansch","salary":"33956","height":"1.93","still_hired":"false","avg_worked_seconds":"307364077","job_positions":"Purchase Manager","is_rehired":["false","false","true","false"],"salary_change":["-8.66","-2.52"]}
+{ "index": {}}
+{"birth_date":"1963-04-14T00:00:00Z","emp_no":"10065","first_name":"Satosi","gender":"M","hire_date":"1988-05-18T00:00:00Z","languages":"2","last_name":"Awdeh","salary":"50249","height":"1.59","still_hired":"false","avg_worked_seconds":"372660279","job_positions":["Business Analyst","Data Scientist","Principal Support Engineer"],"is_rehired":["false","true"],"salary_change":["-1.47","14.44","-9.81"]}
+{ "index": {}}
+{"birth_date":"1952-11-13T00:00:00Z","emp_no":"10066","first_name":"Kwee","gender":"M","hire_date":"1986-02-26T00:00:00Z","languages":"5","last_name":"Schusler","salary":"31897","height":"2.10","still_hired":"true","avg_worked_seconds":"360906451","job_positions":["Senior Python Developer","Data Scientist","Accountant","Internship"],"is_rehired":["true","true","true"],"salary_change":"5.94"}
+{ "index": {}}
+{"birth_date":"1953-01-07T00:00:00Z","emp_no":"10067","first_name":"Claudi","gender":"M","hire_date":"1987-03-04T00:00:00Z","languages":"2","last_name":"Stavenow","salary":"52044","height":"1.77","still_hired":"true","avg_worked_seconds":"347664141","job_positions":["Tech Lead","Principal Support Engineer"],"is_rehired":["false","false"],"salary_change":["8.72","4.44"]}
+{ "index": {}}
+{"birth_date":"1962-11-26T00:00:00Z","emp_no":"10068","first_name":"Charlene","gender":"M","hire_date":"1987-08-07T00:00:00Z","languages":"3","last_name":"Brattka","salary":"28941","height":"1.58","still_hired":"true","avg_worked_seconds":"233999584","job_positions":"Architect","is_rehired":"true","salary_change":["3.43","-5.61","-5.29"]}
+{ "index": {}}
+{"birth_date":"1960-09-06T00:00:00Z","emp_no":"10069","first_name":"Margareta","gender":"F","hire_date":"1989-11-05T00:00:00Z","languages":"5","last_name":"Bierman","salary":"41933","height":"1.77","still_hired":"true","avg_worked_seconds":"366512352","job_positions":["Business Analyst","Junior Developer","Purchase Manager","Support Engineer"],"is_rehired":"false","salary_change":["-3.34","-6.33","6.23","-0.31"]}
+{ "index": {}}
+{"birth_date":"1955-08-20T00:00:00Z","emp_no":"10070","first_name":"Reuven","gender":"M","hire_date":"1985-10-14T00:00:00Z","languages":"3","last_name":"Garigliano","salary":"54329","height":"1.77","still_hired":"true","avg_worked_seconds":"347188604","is_rehired":["true","true","true"],"salary_change":"-5.90"}
+{ "index": {}}
+{"birth_date":"1958-01-21T00:00:00Z","emp_no":"10071","first_name":"Hisao","gender":"M","hire_date":"1987-10-01T00:00:00Z","languages":"2","last_name":"Lipner","salary":"40612","height":"2.07","still_hired":"false","avg_worked_seconds":"306671693","job_positions":["Business Analyst","Reporting Analyst","Senior Team Lead"],"is_rehired":["false","false","false"],"salary_change":"-2.69"}
+{ "index": {}}
+{"birth_date":"1952-05-15T00:00:00Z","emp_no":"10072","first_name":"Hironoby","gender":"F","hire_date":"1988-07-21T00:00:00Z","languages":"5","last_name":"Sidou","salary":"54518","height":"1.82","still_hired":"true","avg_worked_seconds":"209506065","job_positions":["Architect","Tech Lead","Python Developer","Senior Team Lead"],"is_rehired":["false","false","true","false"],"salary_change":["11.21","-2.30","2.22","-5.44"]}
+{ "index": {}}
+{"birth_date":"1954-02-23T00:00:00Z","emp_no":"10073","first_name":"Shir","gender":"M","hire_date":"1991-12-01T00:00:00Z","languages":"4","last_name":"McClurg","salary":"32568","height":"1.66","still_hired":"false","avg_worked_seconds":"314930367","job_positions":["Principal Support Engineer","Python Developer","Junior Developer","Purchase Manager"],"is_rehired":["true","false"],"salary_change":"-5.67"}
+{ "index": {}}
+{"birth_date":"1955-08-28T00:00:00Z","emp_no":"10074","first_name":"Mokhtar","gender":"F","hire_date":"1990-08-13T00:00:00Z","languages":"5","last_name":"Bernatsky","salary":"38992","height":"1.64","still_hired":"true","avg_worked_seconds":"382397583","job_positions":["Senior Python Developer","Python Developer"],"is_rehired":["true","false","false","true"],"salary_change":["6.70","1.98","-5.64","2.96"]}
+{ "index": {}}
+{"birth_date":"1960-03-09T00:00:00Z","emp_no":"10075","first_name":"Gao","gender":"F","hire_date":"1987-03-19T00:00:00Z","languages":"5","last_name":"Dolinsky","salary":"51956","height":"1.94","still_hired":"false","avg_worked_seconds":"370238919","job_positions":"Purchase Manager","is_rehired":"true","salary_change":["9.63","-3.29","8.42"]}
+{ "index": {}}
+{"birth_date":"1952-06-13T00:00:00Z","emp_no":"10076","first_name":"Erez","gender":"F","hire_date":"1985-07-09T00:00:00Z","languages":"3","last_name":"Ritzmann","salary":"62405","height":"1.83","still_hired":"false","avg_worked_seconds":"376240317","job_positions":["Architect","Senior Python Developer"],"is_rehired":"false","salary_change":["-6.90","-1.30","8.75"]}
+{ "index": {}}
+{"birth_date":"1964-04-18T00:00:00Z","emp_no":"10077","first_name":"Mona","gender":"M","hire_date":"1990-03-02T00:00:00Z","languages":"5","last_name":"Azuma","salary":"46595","height":"1.68","still_hired":"false","avg_worked_seconds":"351960222","job_positions":"Internship","salary_change":"-0.01"}
+{ "index": {}}
+{"birth_date":"1959-12-25T00:00:00Z","emp_no":"10078","first_name":"Danel","gender":"F","hire_date":"1987-05-26T00:00:00Z","languages":"2","last_name":"Mondadori","salary":"69904","height":"1.81","still_hired":"true","avg_worked_seconds":"377116038","job_positions":["Architect","Principal Support Engineer","Internship"],"is_rehired":"true","salary_change":["-7.88","9.98","12.52"]}
+{ "index": {}}
+{"birth_date":"1961-10-05T00:00:00Z","emp_no":"10079","first_name":"Kshitij","gender":"F","hire_date":"1986-03-27T00:00:00Z","languages":"2","last_name":"Gils","salary":"32263","height":"1.59","still_hired":"false","avg_worked_seconds":"320953330","is_rehired":"false","salary_change":"7.58"}
+{ "index": {}}
+{"birth_date":"1957-12-03T00:00:00Z","emp_no":"10080","first_name":"Premal","gender":"M","hire_date":"1985-11-19T00:00:00Z","languages":"5","last_name":"Baek","salary":"52833","height":"1.80","still_hired":"false","avg_worked_seconds":"239266137","job_positions":"Senior Python Developer","salary_change":["-4.35","7.36","5.56"]}
+{ "index": {}}
+{"birth_date":"1960-12-17T00:00:00Z","emp_no":"10081","first_name":"Zhongwei","gender":"M","hire_date":"1986-10-30T00:00:00Z","languages":"2","last_name":"Rosen","salary":"50128","height":"1.44","still_hired":"true","avg_worked_seconds":"321375511","job_positions":["Accountant","Internship"],"is_rehired":["false","false","false"]}
+{ "index": {}}
+{"birth_date":"1963-09-09T00:00:00Z","emp_no":"10082","first_name":"Parviz","gender":"M","hire_date":"1990-01-03T00:00:00Z","languages":"4","last_name":"Lortz","salary":"49818","height":"1.61","still_hired":"false","avg_worked_seconds":"232522994","job_positions":"Principal Support Engineer","is_rehired":"false","salary_change":["1.19","-3.39"]}
+{ "index": {}}
+{"birth_date":"1959-07-23T00:00:00Z","emp_no":"10083","first_name":"Vishv","gender":"M","hire_date":"1987-03-31T00:00:00Z","languages":"1","last_name":"Zockler","salary":"39110","height":"1.42","still_hired":"false","avg_worked_seconds":"331236443","job_positions":"Head Human Resources"}
+{ "index": {}}
+{"birth_date":"1960-05-25T00:00:00Z","emp_no":"10084","first_name":"Tuval","gender":"M","hire_date":"1995-12-15T00:00:00Z","languages":"1","last_name":"Kalloufi","salary":"28035","height":"1.51","still_hired":"true","avg_worked_seconds":"359067056","job_positions":"Principal Support Engineer","is_rehired":"false"}
+{ "index": {}}
+{"birth_date":"1962-11-07T00:00:00Z","emp_no":"10085","first_name":"Kenroku","gender":"M","hire_date":"1994-04-09T00:00:00Z","languages":"5","last_name":"Malabarba","salary":"35742","height":"2.01","still_hired":"true","avg_worked_seconds":"353404008","job_positions":["Senior Python Developer","Business Analyst","Tech Lead","Accountant"],"salary_change":["11.67","6.75","8.40"]}
+{ "index": {}}
+{"birth_date":"1962-11-19T00:00:00Z","emp_no":"10086","first_name":"Somnath","gender":"M","hire_date":"1990-02-16T00:00:00Z","languages":"1","last_name":"Foote","salary":"68547","height":"1.74","still_hired":"true","avg_worked_seconds":"328580163","job_positions":"Senior Python Developer","is_rehired":["false","true"],"salary_change":"13.61"}
+{ "index": {}}
+{"birth_date":"1959-07-23T00:00:00Z","emp_no":"10087","first_name":"Xinglin","gender":"F","hire_date":"1986-09-08T00:00:00Z","languages":"5","last_name":"Eugenio","salary":"32272","height":"1.74","still_hired":"true","avg_worked_seconds":"305782871","job_positions":["Junior Developer","Internship"],"is_rehired":["false","false"],"salary_change":"-2.05"}
+{ "index": {}}
+{"birth_date":"1954-02-25T00:00:00Z","emp_no":"10088","first_name":"Jungsoon","gender":"F","hire_date":"1988-09-02T00:00:00Z","languages":"5","last_name":"Syrzycki","salary":"39638","height":"1.91","still_hired":"false","avg_worked_seconds":"330714423","job_positions":["Reporting Analyst","Business Analyst","Tech Lead"],"is_rehired":"true"}
+{ "index": {}}
+{"birth_date":"1963-03-21T00:00:00Z","emp_no":"10089","first_name":"Sudharsan","gender":"F","hire_date":"1986-08-12T00:00:00Z","languages":"4","last_name":"Flasterstein","salary":"43602","height":"1.57","still_hired":"true","avg_worked_seconds":"232951673","job_positions":["Junior Developer","Accountant"],"is_rehired":["true","false","false","false"]}
+{ "index": {}}
+{"birth_date":"1961-05-30T00:00:00Z","emp_no":"10090","first_name":"Kendra","gender":"M","hire_date":"1986-03-14T00:00:00Z","languages":"2","last_name":"Hofting","salary":"44956","height":"2.03","still_hired":"true","avg_worked_seconds":"212460105","is_rehired":["false","false","false","true"],"salary_change":["7.15","-1.85","3.60"]}
+{ "index": {}}
+{"birth_date":"1955-10-04T00:00:00Z","emp_no":"10091","first_name":"Amabile","gender":"M","hire_date":"1992-11-18T00:00:00Z","languages":"3","last_name":"Gomatam","salary":"38645","height":"2.09","still_hired":"true","avg_worked_seconds":"242582807","job_positions":["Reporting Analyst","Python Developer"],"is_rehired":["true","true","false","false"],"salary_change":["-9.23","7.50","5.85","5.19"]}
+{ "index": {}}
+{"birth_date":"1964-10-18T00:00:00Z","emp_no":"10092","first_name":"Valdiodio","gender":"F","hire_date":"1989-09-22T00:00:00Z","languages":"1","last_name":"Niizuma","salary":"25976","height":"1.75","still_hired":"false","avg_worked_seconds":"313407352","job_positions":["Junior Developer","Accountant"],"is_rehired":["false","false","true","true"],"salary_change":["8.78","0.39","-6.77","8.30"]}
+{ "index": {}}
+{"birth_date":"1964-06-11T00:00:00Z","emp_no":"10093","first_name":"Sailaja","gender":"M","hire_date":"1996-11-05T00:00:00Z","languages":"3","last_name":"Desikan","salary":"45656","height":"1.69","still_hired":"false","avg_worked_seconds":"315904921","job_positions":["Reporting Analyst","Tech Lead","Principal Support Engineer","Purchase Manager"],"salary_change":"-0.88"}
+{ "index": {}}
+{"birth_date":"1957-05-25T00:00:00Z","emp_no":"10094","first_name":"Arumugam","gender":"F","hire_date":"1987-04-18T00:00:00Z","languages":"5","last_name":"Ossenbruggen","salary":"66817","height":"2.10","still_hired":"false","avg_worked_seconds":"332920135","job_positions":["Senior Python Developer","Principal Support Engineer","Accountant"],"is_rehired":["true","false","true"],"salary_change":["2.22","7.92"]}
+{ "index": {}}
+{"birth_date":"1965-01-03T00:00:00Z","emp_no":"10095","first_name":"Hilari","gender":"M","hire_date":"1986-07-15T00:00:00Z","languages":"4","last_name":"Morton","salary":"37702","height":"1.55","still_hired":"false","avg_worked_seconds":"321850475","is_rehired":["true","true","false","false"],"salary_change":["-3.93","-6.66"]}
+{ "index": {}}
+{"birth_date":"1954-09-16T00:00:00Z","emp_no":"10096","first_name":"Jayson","gender":"M","hire_date":"1990-01-14T00:00:00Z","languages":"4","last_name":"Mandell","salary":"43889","height":"1.94","still_hired":"false","avg_worked_seconds":"204381503","job_positions":["Architect","Reporting Analyst"],"is_rehired":["false","false","false"]}
+{ "index": {}}
+{"birth_date":"1952-02-27T00:00:00Z","emp_no":"10097","first_name":"Remzi","gender":"M","hire_date":"1990-09-15T00:00:00Z","languages":"3","last_name":"Waschkowski","salary":"71165","height":"1.53","still_hired":"false","avg_worked_seconds":"206258084","job_positions":["Reporting Analyst","Tech Lead"],"is_rehired":["true","false"],"salary_change":"-1.12"}
+{ "index": {}}
+{"birth_date":"1961-09-23T00:00:00Z","emp_no":"10098","first_name":"Sreekrishna","gender":"F","hire_date":"1985-05-13T00:00:00Z","languages":"4","last_name":"Servieres","salary":"44817","height":"2.00","still_hired":"false","avg_worked_seconds":"272392146","job_positions":["Architect","Internship","Senior Team Lead"],"is_rehired":"false","salary_change":["-2.83","8.31","4.38"]}
+{ "index": {}}
+{"birth_date":"1956-05-25T00:00:00Z","emp_no":"10099","first_name":"Valter","gender":"F","hire_date":"1988-10-18T00:00:00Z","languages":"2","last_name":"Sullins","salary":"73578","height":"1.81","still_hired":"true","avg_worked_seconds":"377713748","is_rehired":["true","true"],"salary_change":["10.71","14.26","-8.78","-3.98"]}
+{ "index": {}}
+{"birth_date":"1953-04-21T00:00:00Z","emp_no":"10100","first_name":"Hironobu","gender":"F","hire_date":"1987-09-21T00:00:00Z","languages":"4","last_name":"Haraldson","salary":"68431","height":"1.77","still_hired":"true","avg_worked_seconds":"223910853","job_positions":"Purchase Manager","is_rehired":["false","true","true","false"],"salary_change":["13.97","-7.49"]}
+```
+
+::::
+
+
+
+## Convert the dataset [convert-dataset-pandas-dataframe]
+
+Use the ES|QL CSV import to convert the `employees` dataset to a Pandas dataframe object.
+
+```python
+from io import StringIO
+from elasticsearch import Elasticsearch
+import pandas as pd
+client = Elasticsearch(
+    "https://[host].elastic-cloud.com",
+    api_key="...",
+)
+response = client.esql.query(
+    query="FROM employees | LIMIT 500",
+    format="csv",
+)
+df = pd.read_csv(StringIO(response.body))
+print(df)
+```
+
+Even though the dataset contains only 100 records, a LIMIT of 500 is specified to suppress ES|QL warnings about potentially missing records. This prints the following dataframe:
+
+```python
+    avg_worked_seconds  ...  salary_change.long still_hired
+0            268728049  ...                   1        True
+1            328922887  ...            [-7, 11]        True
+2            200296405  ...            [12, 14]       False
+3            311267831  ...       [0, 1, 3, 13]        True
+4            244294991  ...            [-2, 13]        True
+..                 ...  ...                 ...         ...
+95           204381503  ...                 NaN       False
+96           206258084  ...                  -1       False
+97           272392146  ...          [-2, 4, 8]       False
+98           377713748  ...    [-8, -3, 10, 14]        True
+99           223910853  ...            [-7, 13]        True
+```
+
+You can now analyze the data with Pandas or you can also continue transforming the data using ES|QL.
+
+
+## Analyze the data with Pandas [analyze-data]
+
+In the next example, the [STATS … BY](elasticsearch://reference/query-languages/esql/commands/processing-commands.md#esql-stats-by) command is utilized to count how many employees are speaking a given language. The results are sorted with the `languages` column using [SORT](elasticsearch://reference/query-languages/esql/commands/processing-commands.md#esql-sort):
+
+```python
+response = client.esql.query(
+    query="""
+    FROM employees
+    | STATS count = COUNT(emp_no) BY languages
+    | SORT languages
+    | LIMIT 500
+    """,
+    format="csv",
+)
+df = pd.read_csv(
+    StringIO(response.body),
+    dtype={"count": "Int64", "languages": "Int64"},
+)
+print(df)
+```
+
+Note that the `dtype` parameter of `pd.read_csv()` is useful when the type inferred by Pandas is not enough. The code prints the following response:
+
+```python
+   count  languages
+0     15          1
+1     19          2
+2     17          3
+3     18          4
+4     21          5
+```
+
+
+## Pass parameters to a query with ES|QL [passing-params]
+
+Use the [built-in parameters support of the ES|QL REST API](docs-content://explore-analyze/query-filter/languages/esql-rest.md#esql-rest-params) to pass parameters to a query:
+
+```python
+response = client.esql.query(
+    query="""
+    FROM employees
+    | STATS count = COUNT(emp_no) BY languages
+    | WHERE languages >= (?)
+    | SORT languages
+    | LIMIT 500
+    """,
+    format="csv",
+    params=[3],
+)
+df = pd.read_csv(
+    StringIO(response.body),
+    dtype={"count": "Int64", "languages": "Int64"},
+)
+print(df)
+```
+
+The code above outputs the following:
+
+```python
+   count  languages
+0     17          3
+1     18          4
+2     21          5
+```
+
+If you want to learn more about ES|QL, refer to the [ES|QL documentation](docs-content://explore-analyze/query-filter/languages/esql.md). You can also check out this other [Python example using Boston Celtics data](https://github.com/elastic/elasticsearch-labs/blob/main/supporting-blog-content/Boston-Celtics-Demo/celtics-esql-demo.ipynb).
+
diff -pruN 8.17.2-2/docs/reference/esql-query-builder.md 9.2.0-1/docs/reference/esql-query-builder.md
--- 8.17.2-2/docs/reference/esql-query-builder.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/esql-query-builder.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,260 @@
+# ES|QL Query Builder
+
+::::{warning}
+This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features.
+::::
+
+The ES|QL Query Builder allows you to construct ES|QL queries using Python syntax. Consider the following example:
+
+```python
+>>> from elasticsearch.esql import ESQL
+>>> query = (
+    ESQL.from_("employees")
+    .sort("emp_no")
+    .keep("first_name", "last_name", "height")
+    .eval(height_feet="height * 3.281", height_cm="height * 100")
+    .limit(3)
+)
+```
+
+You can then see the assembled ES|QL query by printing the resulting query object:
+
+```python
+>>> print(query)
+FROM employees
+| SORT emp_no
+| KEEP first_name, last_name, height
+| EVAL height_feet = height * 3.281, height_cm = height * 100
+| LIMIT 3
+```
+
+To execute this query, you can pass it to the `client.esql.query()` endpoint:
+
+```python
+>>> from elasticsearch import Elasticsearch
+>>> client = Elasticsearch(hosts=[os.environ['ELASTICSEARCH_URL']])
+>>> response = client.esql.query(query=query)
+```
+
+The response body contains a `columns` attribute with the list of columns included in the results, and a `values` attribute with the list of results for the query, each given as a list of column values. Here is a possible response body returned by the example query given above:
+
+```python
+>>> from pprint import pprint
+>>> pprint(response.body)
+{'columns': [{'name': 'first_name', 'type': 'text'},
+             {'name': 'last_name', 'type': 'text'},
+             {'name': 'height', 'type': 'double'},
+             {'name': 'height_feet', 'type': 'double'},
+             {'name': 'height_cm', 'type': 'double'}],
+ 'is_partial': False,
+ 'took': 11,
+ 'values': [['Adrian', 'Wells', 2.424, 7.953144, 242.4],
+            ['Aaron', 'Gonzalez', 1.584, 5.1971, 158.4],
+            ['Miranda', 'Kramer', 1.55, 5.08555, 155]]}
+```
+
+## Creating an ES|QL query
+
+To construct an ES|QL query you start from one of the ES|QL source commands:
+
+### `ESQL.from_`
+
+The `FROM` command selects the indices, data streams or aliases to be queried.
+
+Examples:
+
+```python
+from elasticsearch.esql import ESQL
+
+# FROM employees
+query1 = ESQL.from_("employees")
+
+# FROM <logs-{now/d}>
+query2 = ESQL.from_("<logs-{now/d}>")
+
+# FROM employees-00001, other-employees-*
+query3 = ESQL.from_("employees-00001", "other-employees-*")
+
+# FROM cluster_one:employees-00001, cluster_two:other-employees-*
+query4 = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*")
+
+# FROM employees METADATA _id
+query5 = ESQL.from_("employees").metadata("_id")
+```
+
+Note how in the last example the optional `METADATA` clause of the `FROM` command is added as a chained method.
+
+### `ESQL.row`
+
+The `ROW` command produces a row with one or more columns, with the values that you specify.
+
+Examples:
+
+```python
+from elasticsearch.esql import ESQL, functions
+
+# ROW a = 1, b = "two", c = null
+query1 = ESQL.row(a=1, b="two", c=None)
+
+# ROW a = [1, 2]
+query2 = ESQL.row(a=[1, 2])
+
+# ROW a = ROUND(1.23, 0)
+query3 = ESQL.row(a=functions.round(1.23, 0))
+```
+
+### `ESQL.show`
+
+The `SHOW` command returns information about the deployment and its capabilities.
+
+Example:
+
+```python
+from elasticsearch.esql import ESQL
+
+# SHOW INFO
+query = ESQL.show("INFO")
+```
+
+## Adding processing commands
+
+Once you have a query object, you can add one or more processing commands to it. The following
+example shows how to create a query that uses the `WHERE` and `LIMIT` commands to filter the
+results:
+
+```python
+from elasticsearch.esql import ESQL
+
+# FROM employees
+# | WHERE still_hired == true
+# | LIMIT 10
+query = ESQL.from_("employees").where("still_hired == true").limit(10)
+```
+
+For a complete list of available commands, review the methods of the [`ESQLBase` class](https://elasticsearch-py.readthedocs.io/en/stable/esql.html) in the Elasticsearch Python API documentation.
+
+## Creating ES|QL Expressions and Conditions
+
+The ES|QL query builder for Python provides two ways to create expressions and conditions in ES|QL queries.
+
+The simplest option is to provide all ES|QL expressions and conditionals as strings. The following example uses this approach to add two calculated columns to the results using the `EVAL` command:
+
+```python
+from elasticsearch.esql import ESQL
+
+# FROM employees
+# | SORT emp_no
+# | KEEP first_name, last_name, height
+# | EVAL height_feet = height * 3.281, height_cm = height * 100
+query = (
+    ESQL.from_("employees")
+    .sort("emp_no")
+    .keep("first_name", "last_name", "height")
+    .eval(height_feet="height * 3.281", height_cm="height * 100")
+)
+```
+
+A more advanced alternative is to replace the strings with Python expressions, which are automatically translated to ES|QL when the query object is rendered to a string. The following example is functionally equivalent to the one above:
+
+```python
+from elasticsearch.esql import ESQL, E
+
+# FROM employees
+# | SORT emp_no
+# | KEEP first_name, last_name, height
+# | EVAL height_feet = height * 3.281, height_cm = height * 100
+query = (
+    ESQL.from_("employees")
+    .sort("emp_no")
+    .keep("first_name", "last_name", "height")
+    .eval(height_feet=E("height") * 3.281, height_cm=E("height") * 100)
+)
+```
+
+Here the `E()` helper function is used as a wrapper to the column name that initiates an ES|QL expression. The `E()` function transforms the given column into an ES|QL expression that can be modified with Python operators.
+
+Here is a second example, which uses a conditional expression in the `WHERE` command:
+
+```python
+from elasticsearch.esql import ESQL
+
+# FROM employees
+# | KEEP first_name, last_name, height
+# | WHERE first_name == "Larry"
+query = (
+    ESQL.from_("employees")
+    .keep("first_name", "last_name", "height")
+    .where('first_name == "Larry"')
+)
+```
+
+Using Python syntax, the condition can be rewritten as follows:
+
+```python
+from elasticsearch.esql import ESQL, E
+
+# FROM employees
+# | KEEP first_name, last_name, height
+# | WHERE first_name == "Larry"
+query = (
+    ESQL.from_("employees")
+    .keep("first_name", "last_name", "height")
+    .where(E("first_name") == "Larry")
+)
+```
+
+### Preventing injection attacks
+
+ES|QL, like most query languages, is vulnerable to [code injection attacks](https://en.wikipedia.org/wiki/Code_injection) if untrusted data provided by users is added to a query. To eliminate this risk, ES|QL allows untrusted data to be given separately from the query as parameters.
+
+Continuing with the example above, let's assume that the application needs a `find_employee_by_name()` function that searches for the name given as an argument. If this argument is received by the application from users, then it is considered untrusted and should not be added to the query directly. Here is how to code the function in a secure manner:
+
+```python
+def find_employee_by_name(name):
+    query = (
+        ESQL.from_("employees")
+        .keep("first_name", "last_name", "height")
+        .where(E("first_name") == E("?"))
+    )
+    return client.esql.query(query=query, params=[name])
+```
+
+Here the part of the query in which the untrusted data needs to be inserted is replaced with a parameter, which in ES|QL is defined by the question mark. When using Python expressions, the parameter must be given as `E("?")` so that it is treated as an expression and not as a literal string.
+
+The list of values given in the `params` argument to the query endpoint are assigned in order to the parameters defined in the query.
+
+## Using ES|QL functions
+
+The ES|QL language includes a rich set of functions that can be used in expressions and conditionals. These can be included in expressions given as strings, as shown in the example below:
+
+```python
+from elasticsearch.esql import ESQL
+
+# FROM employees
+# | KEEP first_name, last_name, height
+# | WHERE LENGTH(first_name) < 4"
+query = (
+    ESQL.from_("employees")
+    .keep("first_name", "last_name", "height")
+    .where("LENGTH(first_name) < 4")
+)
+```
+
+All available ES|QL functions have Python wrappers in the `elasticsearch.esql.functions` module, which can be used when building expressions using Python syntax. Below is the example above coded using Python syntax:
+
+```python
+from elasticsearch.esql import ESQL, functions
+
+# FROM employees
+# | KEEP first_name, last_name, height
+# | WHERE LENGTH(first_name) < 4"
+query = (
+    ESQL.from_("employees")
+    .keep("first_name", "last_name", "height")
+    .where(functions.length(E("first_name")) < 4)
+)
+```
+
+Note that arguments passed to functions are assumed to be literals. When passing field names, parameters or other ES|QL expressions, it is necessary to wrap them with the `E()` helper function so that they are interpreted correctly.
+
+You can find the complete list of available functions in the Python client's [ES|QL API reference documentation](https://elasticsearch-py.readthedocs.io/en/stable/esql.html#module-elasticsearch.esql.functions).
diff -pruN 8.17.2-2/docs/reference/examples.md 9.2.0-1/docs/reference/examples.md
--- 8.17.2-2/docs/reference/examples.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/examples.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,200 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/examples.html
+navigation_title: Examples
+---
+
+# {{es}} Python client examples [examples]
+
+Below you can find examples of how to use the most frequently called APIs with the Python client.
+
+* [Indexing a document](#ex-index)
+* [Getting a document](#ex-get)
+* [Refreshing an index](#ex-refresh)
+* [Searching for a document](#ex-search)
+* [Updating a document](#ex-update)
+* [Deleting a document](#ex-delete)
+
+
+## Indexing a document [ex-index]
+
+To index a document, you need to specify three pieces of information: `index`, `id`, and a `document`:
+
+```py
+from datetime import datetime
+from elasticsearch import Elasticsearch
+client = Elasticsearch('https://localhost:9200')
+
+doc = {
+    'author': 'author_name',
+    'text': 'Interesting content...',
+    'timestamp': datetime.now(),
+}
+resp = client.index(index="test-index", id=1, document=doc)
+print(resp['result'])
+```
+
+
+## Getting a document [ex-get]
+
+To get a document, you need to specify its `index` and `id`:
+
+```py
+resp = client.get(index="test-index", id=1)
+print(resp['_source'])
+```
+
+
+## Refreshing an index [ex-refresh]
+
+You can perform the refresh operation on an index:
+
+```py
+client.indices.refresh(index="test-index")
+```
+
+
+## Searching for a document [ex-search]
+
+The `search()` method returns results that are matching a query:
+
+```py
+resp = client.search(index="test-index", query={"match_all": {}})
+print("Got %d Hits:" % resp['hits']['total']['value'])
+for hit in resp['hits']['hits']:
+    print("%(timestamp)s %(author)s: %(text)s" % hit["_source"])
+```
+
+
+## Updating a document [ex-update]
+
+To update a document, you need to specify three pieces of information: `index`, `id`, and a `doc`:
+
+```py
+from datetime import datetime
+from elasticsearch import Elasticsearch
+
+client = Elasticsearch('https://localhost:9200')
+
+doc = {
+    'author': 'author_name',
+    'text': 'Interesting modified content...',
+    'timestamp': datetime.now(),
+}
+resp = client.update(index="test-index", id=1, doc=doc)
+print(resp['result'])
+```
+
+
+## Deleting a document [ex-delete]
+
+You can delete a document by specifying its `index`, and `id` in the `delete()` method:
+
+```py
+client.delete(index="test-index", id=1)
+```
+
+
+## Interactive examples [ex-interactive]
+
+The [elasticsearch-labs](https://github.com/elastic/elasticsearch-labs) repo contains interactive and executable [Python notebooks](https://github.com/elastic/elasticsearch-labs/tree/main/notebooks), sample apps, and resources for testing out Elasticsearch, using the Python client. These examples are mainly focused on vector search, hybrid search and generative AI use cases, but you’ll also find examples of basic operations like creating index mappings and performing lexical search.
+
+
+### Search notebooks [_search_notebooks]
+
+The [Search](https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/search) folder is a good place to start if you’re new to Elasticsearch. This folder contains a number of notebooks that demonstrate the fundamentals of Elasticsearch, like indexing vectors, running lexical, semantic and *hybrid* searches, and more.
+
+The following notebooks are available:
+
+* [Quick start](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/00-quick-start.ipynb)
+* [Keyword, querying, filtering](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/01-keyword-querying-filtering.ipynb)
+* [Hybrid search](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/02-hybrid-search.ipynb)
+* [Semantic search with ELSER](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb)
+* [Multilingual semantic search](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/04-multilingual.ipynb)
+* [Query rules](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/05-query-rules.ipynb)
+* [Synonyms API quick start](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/06-synonyms-api.ipynb)
+
+Here’s a brief overview of what you’ll learn in each notebook.
+
+
+#### Quick start [_quick_start]
+
+In the [00-quick-start.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/00-quick-start.ipynb) notebook you’ll learn how to:
+
+* Use the Elasticsearch Python client for various operations.
+* Create and define an index for a sample dataset with `dense_vector` fields.
+* Transform book titles into embeddings using [Sentence Transformers](https://www.sbert.net) and index them into Elasticsearch.
+* Perform k-nearest neighbors (knn) semantic searches.
+* Integrate traditional text-based search with semantic search, for a hybrid search system.
+* Use reciprocal rank fusion (RRF) to intelligently combine search results from different retrieval systems.
+
+
+#### Keyword, querying, filtering [_keyword_querying_filtering]
+
+In the [01-keyword-querying-filtering.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/01-keyword-querying-filtering.ipynb) notebook, you’ll learn how to:
+
+* Use [query and filter contexts](docs-content://explore-analyze/query-filter/languages/querydsl.md) to search and filter documents in Elasticsearch.
+* Execute full-text searches with `match` and `multi-match` queries.
+* Query and filter documents based on `text`, `number`, `date`, or `boolean` values.
+* Run multi-field searches using the `multi-match` query.
+* Prioritize specific fields in the `multi-match` query for tailored results.
+
+
+#### Hybrid search [_hybrid_search]
+
+In the [02-hybrid-search.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/02-hybrid-search.ipynb) notebook, you’ll learn how to:
+
+* Combine results of traditional text-based search with semantic search, for a hybrid search system.
+* Transform fields in the sample dataset into embeddings using the Sentence Transformer model and index them into Elasticsearch.
+* Use the [RRF API](elasticsearch://reference/elasticsearch/rest-apis/reciprocal-rank-fusion.md#rrf-api) to combine the results of a `match` query and a `kNN` semantic search.
+* Walk through a super simple toy example that demonstrates, step by step, how RRF ranking works.
+
+
+#### Semantic search with ELSER [_semantic_search_with_elser]
+
+In the [03-ELSER.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb) notebook, you’ll learn how to:
+
+* Use the Elastic Learned Sparse Encoder (ELSER) for text expansion-powered semantic search, out of the box — without training, fine-tuning, or embeddings generation.
+* Download and deploy the ELSER model in your Elastic environment.
+* Create an Elasticsearch index named search-movies with specific mappings and index a dataset of movie descriptions.
+* Create an ingest pipeline containing an inference processor for ELSER model execution.
+* Reindex the data from search-movies into another index, elser-movies, using the ELSER pipeline for text expansion.
+* Observe the results of running the documents through the model by inspecting the additional terms it adds to documents, which enhance searchability.
+* Perform simple keyword searches on the elser-movies index to assess the impact of ELSER’s text expansion.
+* Execute ELSER-powered semantic searches using the `text_expansion` query.
+
+
+#### Multilingual semantic search [_multilingual_semantic_search]
+
+In the [04-multilingual.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/04-multilingual.ipynb) notebook, you’ll learn how to:
+
+* Use a multilingual embedding model for semantic search across languages.
+* Transform fields in the sample dataset into embeddings using the Sentence Transformer model and index them into Elasticsearch.
+* Use filtering with a `kNN` semantic search.
+* Walk through a super simple toy example that demonstrates, step by step, how multilingual search works across languages, and within non-English languages.
+
+
+#### Query rules [_query_rules]
+
+In the [05-query-rules.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/05-query-rules.ipynb) notebook, you’ll learn how to:
+
+* Use the query rules management APIs to create and edit promotional rules based on contextual queries.
+* Apply these query rules by using the `rule_query` in Query DSL.
+
+
+#### Synonyms API quick start [_synonyms_api_quick_start]
+
+In the [06-synonyms-api.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/06-synonyms-api.ipynb) notebook, you’ll learn how to:
+
+* Use the synonyms management API to create a synonyms set to enhance your search recall.
+* Configure an index to use search-time synonyms.
+* Update synonyms in real time.
+* Run queries that are enhanced by synonyms.
+
+
+### Other notebooks [_other_notebooks]
+
+* [Generative AI](https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/generative-ai). Notebooks that demonstrate various use cases for Elasticsearch as the retrieval engine and vector store for LLM-powered applications.
+* [Integrations](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations). Notebooks that demonstrate how to integrate popular services and projects with Elasticsearch, including OpenAI, Hugging Face, and LlamaIndex
+* [Langchain](https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/langchain). Notebooks that demonstrate how to integrate Elastic with LangChain, a framework for developing applications powered by language models.
+
diff -pruN 8.17.2-2/docs/reference/getting-started.md 9.2.0-1/docs/reference/getting-started.md
--- 8.17.2-2/docs/reference/getting-started.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/getting-started.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,176 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html
+  - https://www.elastic.co/guide/en/serverless/current/elasticsearch-python-client-getting-started.html
+---
+
+# Getting started [getting-started-python]
+
+This page guides you through the installation process of the Python client, shows you how to instantiate the client, and how to perform basic Elasticsearch operations with it.
+
+
+### Requirements [_requirements]
+
+* [Python](https://www.python.org/) 3.10 or newer
+* [`pip`](https://pip.pypa.io/en/stable/), installed by default alongside Python
+
+
+### Installation [_installation]
+
+To install the latest version of the client, run the following command:
+
+```shell
+python -m pip install elasticsearch
+```
+
+Refer to the [*Installation*](/reference/installation.md) page to learn more.
+
+
+### Connecting [_connecting]
+
+You can connect to the Elastic Cloud using an API key and the Elasticsearch endpoint.
+
+```py
+from elasticsearch import Elasticsearch
+
+client = Elasticsearch(
+    "https://...",  # Elasticsearch endpoint
+    api_key="api_key",
+)
+```
+
+Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment:
+
+![Finding Elasticsearch endpoint](images/es-endpoint.jpg)
+
+You can generate an API key on the **Management** page under Security.
+
+![Create API key](images/create-api-key.png)
+
+For other connection options, refer to the [*Connecting*](/reference/connecting.md) section.
+
+
+### Operations [_operations]
+
+Time to use Elasticsearch! This section walks you through the basic, and most important, operations of Elasticsearch. For more operations and more advanced examples, refer to the [*Examples*](/reference/examples.md) page.
+
+
+#### Creating an index [_creating_an_index]
+
+This is how you create the `my_index` index:
+
+```py
+client.indices.create(index="my_index")
+```
+
+Optionally, you can first define the expected types of your features with a custom mapping.
+
+```py
+mappings = {
+    "properties": {
+        "foo": {"type": "text"},
+        "bar": {
+            "type": "text",
+            "fields": {
+                "keyword": {
+                    "type": "keyword",
+                    "ignore_above": 256,
+                }
+            },
+        },
+    }
+}
+
+client.indices.create(index="my_index", mappings=mappings)
+```
+
+
+#### Indexing documents [_indexing_documents]
+
+This indexes a document with the index API:
+
+```py
+client.index(
+    index="my_index",
+    id="my_document_id",
+    document={
+        "foo": "foo",
+        "bar": "bar",
+    }
+)
+```
+
+You can also index multiple documents at once with the bulk helper function:
+
+```py
+from elasticsearch import helpers
+
+def generate_docs():
+    for i in range(10):
+        yield {
+            "_index": "my_index",
+            "foo": f"foo {i}",
+            "bar": "bar",
+        }
+
+helpers.bulk(client, generate_docs())
+```
+
+These helpers are the recommended way to perform bulk ingestion. While it is also possible to perform bulk ingestion using `client.bulk` directly, the helpers handle retries, ingesting chunk by chunk and more. See the [*Client helpers*](/reference/client-helpers.md) page for more details.
+
+
+#### Getting documents [_getting_documents]
+
+You can get documents by using the following code:
+
+```py
+client.get(index="my_index", id="my_document_id")
+```
+
+
+#### Searching documents [_searching_documents]
+
+This is how you can create a single match query with the Python client:
+
+```py
+client.search(index="my_index", query={
+    "match": {
+        "foo": "foo"
+    }
+})
+```
+
+
+#### Updating documents [_updating_documents]
+
+This is how you can update a document, for example to add a new field:
+
+```py
+client.update(
+    index="my_index",
+    id="my_document_id",
+    doc={
+        "foo": "bar",
+        "new_field": "new value",
+    }
+)
+```
+
+
+#### Deleting documents [_deleting_documents]
+
+```py
+client.delete(index="my_index", id="my_document_id")
+```
+
+
+#### Deleting an index [_deleting_an_index]
+
+```py
+client.indices.delete(index="my_index")
+```
+
+
+## Further reading [_further_reading]
+
+* Use [*Client helpers*](/reference/client-helpers.md) for a more comfortable experience with the APIs.
Binary files 8.17.2-2/docs/reference/images/create-api-key.png and 9.2.0-1/docs/reference/images/create-api-key.png differ
Binary files 8.17.2-2/docs/reference/images/es-endpoint.jpg and 9.2.0-1/docs/reference/images/es-endpoint.jpg differ
Binary files 8.17.2-2/docs/reference/images/otel-waterfall-retry.png and 9.2.0-1/docs/reference/images/otel-waterfall-retry.png differ
Binary files 8.17.2-2/docs/reference/images/otel-waterfall-with-http.png and 9.2.0-1/docs/reference/images/otel-waterfall-with-http.png differ
Binary files 8.17.2-2/docs/reference/images/otel-waterfall-without-http.png and 9.2.0-1/docs/reference/images/otel-waterfall-without-http.png differ
Binary files 8.17.2-2/docs/reference/images/python-example.png and 9.2.0-1/docs/reference/images/python-example.png differ
diff -pruN 8.17.2-2/docs/reference/index.md 9.2.0-1/docs/reference/index.md
--- 8.17.2-2/docs/reference/index.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/index.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,82 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/index.html
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/overview.html
+---
+
+# Python [overview]
+
+This is the official Python client for {{es}}. Its goal is to provide common ground for all {{es}}-related code in Python. For this reason, the client is designed to be unopinionated and extendable. API reference documentation for this client is available on [Read the Docs](https://elasticsearch-py.readthedocs.io).
+
+
+## Example use [_example_use]
+
+Simple use-case:
+
+```python
+>>> from datetime import datetime
+>>> from elasticsearch import Elasticsearch
+
+# Connect to 'http://localhost:9200'
+>>> client = Elasticsearch("http://localhost:9200")
+
+# Datetimes will be serialized:
+>>> client.index(index="my-index-000001", id=42, document={"any": "data", "timestamp": datetime.now()})
+{'_id': '42', '_index': 'my-index-000001', '_type': 'test-type', '_version': 1, 'ok': True}
+
+# ...but not deserialized
+>>> client.get(index="my-index-000001", id=42)['_source']
+{'any': 'data', 'timestamp': '2013-05-12T19:45:31.804229'}
+```
+
+::::{tip}
+For an elaborate example of how to ingest data into Elastic Cloud, refer to [this page](docs-content://manage-data/ingest/ingesting-data-from-applications/ingest-data-with-python-on-elasticsearch-service.md).
+::::
+
+
+## Features [_features]
+
+The client’s features include:
+
+* Translating basic Python data types to and from JSON
+* Configurable automatic discovery of cluster nodes
+* Persistent connections
+* Load balancing (with pluggable selection strategy) across all available nodes
+* Node timeouts on transient errors
+* Thread safety
+* Pluggable architecture
+
+The client also contains a convenient set of [helpers](client-helpers.md) for some of the more engaging tasks like bulk indexing and reindexing.
+
+
+## Elasticsearch Python DSL [_elasticsearch_python_dsl]
+
+For a higher level access with more limited scope, have a look at the DSL module, which provides a more convenient and idiomatic way to write and manipulate queries.
+
+
+## Compatibility [_compatibility]
+
+Language clients are _forward compatible:_ each client version works with equivalent and later minor versions of {{es}} without breaking. 
+
+Compatibility does not imply full feature parity. New {{es}} features are supported only in equivalent client versions. For example, an 8.12 client fully supports {{es}} 8.12 features and works with 8.13 without breaking; however, it does not support new {{es}} 8.13 features. An 8.13 client fully supports {{es}} 8.13 features.
+
+| Elasticsearch version | elasticsearch-py branch |
+| --- | --- |
+| main | main |
+| 9.x | 9.x |
+| 9.x | 8.x |
+| 8.x | 8.x |
+
+{{es}} language clients are also _backward compatible_ across minor versions &mdash; with default distributions and without guarantees. 
+
+### Major version upgrades
+
+:::{tip}
+To upgrade to a new major version, first upgrade {{es}}, then upgrade the Python {{es}} client.
+:::
+
+Since version 8.0, the {{es}} server supports a compatibility mode that allows smoother upgrade experiences. In a nutshell, this makes it possible to upgrade the {{es}} server to the next major version, while continuing to use the same client. This gives more room to coordinate the upgrade of your codebase to the next major version.
+
+For example, to upgrade a system that uses {{es}} 8.x you can upgrade the {{es}} server to 9.x first, and the 8.x Python {{es}} client will continue to work (aside from any breaking changes, which should be listed in the server release notes). You can continue using the 8.x client during the server migration, and only upgrade it once the server migration is complete. The process is described in detail in the [REST API compatibility workflow](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/compatibility#_rest_api_compatibility_workflow) section of the {{es}} documentation.
+
+If you need to work with multiple client versions, note that older versions are also released with the `elasticsearch8` and `elasticsearch9` package names so that they can be installed together.
diff -pruN 8.17.2-2/docs/reference/installation.md 9.2.0-1/docs/reference/installation.md
--- 8.17.2-2/docs/reference/installation.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/installation.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,22 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/installation.html
+---
+
+# Installation [installation]
+
+[Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch) or [sign-up](https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page) for a free trial of Elastic Cloud.
+
+The Python client for {{es}} can be installed with pip:
+
+```sh
+$ python -m pip install elasticsearch
+```
+
+If your application uses async/await in Python you can install with the `async` extra:
+
+```sh
+$ python -m pip install elasticsearch[async]
+```
+
+Read more about [how to use asyncio with this project](https://elasticsearch-py.readthedocs.io/en/master/async.md).
diff -pruN 8.17.2-2/docs/reference/integrations.md 9.2.0-1/docs/reference/integrations.md
--- 8.17.2-2/docs/reference/integrations.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/integrations.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,62 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/integrations.html
+---
+
+# Integrations [integrations]
+
+You can find integration options and information on this page.
+
+
+## OpenTelemetry instrumentation [opentelemetry-intro]
+
+The Python Elasticsearch client supports native OpenTelemetry instrumentation following the [OpenTelemetry Semantic Conventions for Elasticsearch](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/). Refer to the [Using OpenTelemetry](/reference/opentelemetry.md) page for details.
+
+
+## ES|QL [esql-intro]
+
+[ES|QL](docs-content://explore-analyze/query-filter/languages/esql.md) is available through the Python Elasticsearch client. Refer to the [ES|QL and Pandas](/reference/esql-pandas.md) page to learn more about using ES|QL and Pandas together with dataframes.
+
+
+## Transport [transport]
+
+The handling of connections, retries, and pooling is handled by the [Elastic Transport Python](https://github.com/elastic/elastic-transport-python) library. Documentation on the low-level classes is available on [Read the Docs](https://elastic-transport-python.readthedocs.io).
+
+
+## Tracking requests with Opaque ID [opaque-id]
+
+You can enrich your requests against Elasticsearch with an identifier string, that allows you to discover this identifier in [deprecation logs](docs-content://deploy-manage/monitor/logging-configuration/update-elasticsearch-logging-levels.md#deprecation-logging), to support you with [identifying search slow log origin](elasticsearch://reference/elasticsearch/index-settings/slow-log.md) or to help with [identifying running tasks](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks).
+
+The opaque ID can be set via the `opaque_id` parameter via the client `.options()` method:
+
+```python
+client = Elasticsearch(...)
+client.options(opaque_id="request-id-...").search(...)
+```
+
+
+## Type Hints [type-hints]
+
+Starting in `elasticsearch-py` v7.10.0 the library now ships with [type hints](https://www.python.org/dev/peps/pep-0484) and supports basic static type analysis with tools like [Mypy](http://mypy-lang.org) and [Pyright](https://github.com/microsoft/pyright).
+
+If we write a script that has a type error like using `request_timeout` with a `str` argument instead of `float` and then run Mypy on the script:
+
+```python
+# script.py
+from elasticsearch import Elasticsearch
+
+client = Elasticsearch(...)
+client.options(
+    request_timeout="5"  # type error!
+).search(...)
+
+# $ mypy script.py
+# script.py:5: error: Argument "request_timeout" to "search" of "Elasticsearch" has
+#                     incompatible type "str"; expected "Union[int, float, None]"
+# Found 1 error in 1 file (checked 1 source file)
+```
+
+Type hints also allow tools like your IDE to check types and provide better auto-complete functionality.
+
+
+
diff -pruN 8.17.2-2/docs/reference/opentelemetry.md 9.2.0-1/docs/reference/opentelemetry.md
--- 8.17.2-2/docs/reference/opentelemetry.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/opentelemetry.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,74 @@
+---
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/opentelemetry.html
+---
+
+# Using OpenTelemetry [opentelemetry]
+
+You can use [OpenTelemetry](https://opentelemetry.io/) to monitor the performance and behavior of your {{es}} requests through the Elasticsearch Python client. The Python client comes with built-in OpenTelemetry instrumentation that emits [distributed tracing spans](docs-content://solutions/observability/apm/traces-ui.md) by default. With that, applications using [manual OpenTelemetry instrumentation](https://www.elastic.co/blog/manual-instrumentation-of-python-applications-opentelemetry) or [automatic OpenTelemetry instrumentation](https://www.elastic.co/blog/auto-instrumentation-of-python-applications-opentelemetry) are enriched with additional spans that contain insightful information about the execution of the {{es}} requests.
+
+The native instrumentation in the Python client follows the [OpenTelemetry Semantic Conventions for {{es}}](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/). In particular, the instrumentation in the client covers the logical layer of {{es}} requests. A single span per request is created that is processed by the service through the Python client. The following image shows a trace that records the handling of two different {{es}} requests: an `info` request and a `search` request.
+
+% TO DO: Use `:class: screenshot`
+![Distributed trace with Elasticsearch spans](images/otel-waterfall-without-http.png)
+
+Usually, OpenTelemetry auto-instrumentation modules come with instrumentation support for HTTP-level communication. In this case, in addition to the logical {{es}} client requests, spans will be captured for the physical HTTP requests emitted by the client. The following image shows a trace with both, {{es}} spans (in blue) and the corresponding HTTP-level spans (in red) after having installed the ``opentelemetry-instrumentation-urllib3`` package:
+
+% TO DO: Use `:class: screenshot`
+![Distributed trace with Elasticsearch spans](images/otel-waterfall-with-http.png)
+
+Advanced Python client behavior such as nodes round-robin and request retries are revealed through the combination of logical {{es}} spans and the physical HTTP spans. The following example shows a `search` request in a scenario with two nodes:
+
+% TO DO: Use `:class: screenshot`
+![Distributed trace with Elasticsearch spans](images/otel-waterfall-retry.png)
+
+The first node is unavailable and results in an HTTP error, while the retry to the second node succeeds. Both HTTP requests are subsumed by the logical {{es}} request span (in blue).
+
+
+### Setup the OpenTelemetry instrumentation [_setup_the_opentelemetry_instrumentation]
+
+When using the [manual Python OpenTelemetry instrumentation](https://opentelemetry.io/docs/languages/python/instrumentation/) or the [OpenTelemetry Python agent](https://opentelemetry.io/docs/languages/python/automatic/), the Python client’s OpenTelemetry instrumentation is enabled by default and uses the global OpenTelemetry SDK with the global tracer provider. If you’re getting started with OpenTelemetry instrumentation, the following blog posts have step-by-step instructions to ingest and explore tracing data with the Elastic stack:
+
+* [Manual instrumentation with OpenTelemetry for Python applications](https://www.elastic.co/blog/manual-instrumentation-of-python-applications-opentelemetry)
+* [Automatic instrumentation with OpenTelemetry for Python applications](https://www.elastic.co/blog/auto-instrumentation-of-python-applications-opentelemetry)
+
+
+## Comparison with community instrumentation [_comparison_with_community_instrumentation]
+
+The [commmunity OpenTelemetry Elasticsearch instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/elasticsearch/elasticsearch.html) also instruments the client and sends OpenTelemetry traces, but was developed before the OpenTelemetry Semantic Conventions for {{es}}, so the traces attributes are inconsistent with other OpenTelemetry Elasticsearch client instrumentations. To avoid tracing the same requests twice, make sure to use only one instrumentation, either by uninstalling the opentelemetry-instrumentation-elasticsearch Python package or by [disabling the native instrumentation](#opentelemetry-config-enable).
+
+
+### Configuring the OpenTelemetry instrumentation [_configuring_the_opentelemetry_instrumentation]
+
+You can configure this OpenTelemetry instrumentation through environment variables. The following configuration options are available.
+
+
+#### Enable / Disable the OpenTelemetry instrumentation [opentelemetry-config-enable]
+
+With this configuration option you can enable (default) or disable the built-in OpenTelemetry instrumentation.
+
+**Default:** `true`
+
+|     |     |
+| --- | --- |
+| Environment Variable | `OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_ENABLED` |
+
+
+#### Capture search request bodies [_capture_search_request_bodies]
+
+Per default, the built-in OpenTelemetry instrumentation does not capture request bodies due to data privacy considerations. You can use this option to enable capturing of search queries from the request bodies of {{es}} search requests in case you wish to gather this information regardless. The options are to capture the raw search query or not capture it at all.
+
+**Default:** `omit`
+
+**Valid Options:** `omit`, `raw`
+
+|     |     |
+| --- | --- |
+| Environment Variable | `OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_CAPTURE_SEARCH_QUERY` |
+
+
+### Overhead [_overhead]
+
+The OpenTelemetry instrumentation (as any other monitoring approach) may come with a slight overhead on CPU, memory, and/or latency. The overhead may only occur when the instrumentation is enabled (default) and an OpenTelemetry SDK is active in the target application. When the instrumentation is disabled or no OpenTelemetry SDK is active within the target application, monitoring overhead is not expected when using the client.
+
+Even in cases where the instrumentation is enabled and is actively used (by an OpenTelemetry SDK), the overhead is minimal and negligible in the vast majority of cases. In edge cases where there is a noticeable overhead, the [instrumentation can be explicitly disabled](#opentelemetry-config-enable) to eliminate any potential impact on performance.
diff -pruN 8.17.2-2/docs/reference/querying.md 9.2.0-1/docs/reference/querying.md
--- 8.17.2-2/docs/reference/querying.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/querying.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,104 @@
+# Querying
+
+The Python Elasticsearch client provides several ways to send queries to Elasticsearch. This document explains the details of how to construct and execute queries using the client. This document does not cover the DSL module.
+
+## From API URLs to function calls
+
+Elasticsearch APIs are grouped by namespaces.
+
+ * There's the global namespace, with APIs like the Search API (`GET _search`) or the Index API (`PUT /<target>/_doc/<_id>` and related endpoints).
+ * Then there are all the other namespaces, such as:
+   * Indices with APIs like the Create index API (`PUT /my-index`),
+   * ES|QL with the Run an ES|QL query API (`POST /_async`),
+   * and so on.
+
+As a result, when you know which namespace and function you need, you can call the function. Assuming that `client` is an Elasticsearch instance, here is how you would call the examples from above:
+
+* Global namespace: `client.search(...)` and `client.index(...)`
+* Other namespaces:
+  * Indices: `client.indices.create(...)`
+  * ES|QL: `client.esql.query(...)`
+
+How can you figure out the namespace?
+
+* The [Elasticsearch API docs](https://www.elastic.co/docs/api/doc/elasticsearch/) can help, even though the tags it uses do not fully map to namespaces.
+* You can also use the client documentation, by:
+  * browsing the [Elasticsearch API Reference](https://elasticsearch-py.readthedocs.io/en/stable/api.html) page, or
+  * searching for your endpoint using [Read the Docs](https://elasticsearch-py.readthedocs.io/) search, which is powered by Elasticsearch!
+* Finally, for Elasticsearch 8.x, most examples in the [Elasticsearch guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) are also available in Python. (This is still a work in progress for Elasticsearch 9.x.) In the example below, `client.ingest.put_pipeline(...)` is the function that calls the "Create or update a pipeline" API.
+
+
+![Python code example in the Elasticsearch guide](images/python-example.png)
+
+## Parameters
+
+Now that you know which functions to call, the next step is parameters. To avoid ambiguity, the Python Elasticsearch client mandates keyword arguments. To give an example, let's look at the ["Create an index" API](https://elasticsearch-py.readthedocs.io/en/stable/api/indices.html#elasticsearch.client.IndicesClient.create). There's only one required parameter, `index`, so the minimal form looks like this:
+
+```python
+from elasticsearch import Elasticsearch
+
+client = Elasticsearch("http://localhost:9200", api_key="...")
+
+client.indices.create(index="my-index")
+```
+
+You can also use other parameters, including the first level of body parameters, such as:
+
+```python
+resp = client.indices.create(
+    index="logs",
+    aliases={"logs-alias": {}},
+    mappings={"name": {"type": "text"}},
+)
+print(resp)
+```
+
+In this case, the client will send to Elasticsearch the following JSON body:
+
+```console
+PUT /logs
+{
+    "aliases": {"logs-alias": {}},
+    "mappings": {"name": {"type": "text"}}
+}
+```
+
+## Unknown parameters or APIs
+
+Like other clients, the Python Elasticsearch client is generated from the [Elasticsearch specification](https://github.com/elastic/elasticsearch-specification). While we strive to keep it up to date, it is not (yet!) perfect, and sometimes body parameters are missing. In this case, you can specify the body directly, as follows:
+
+```python
+resp = client.indices.create(
+    index="logs",
+    body={
+        "aliases": {"logs-alias": {}},
+        "mappings": {"name": {"type": "text"}},
+        "missing_parameter": "foo",
+    }
+)
+print(resp)
+```
+
+In the event where an API is missing, you need to use the low-level `perform_request` function:
+
+```python
+resp = client.perform_request(
+    "PUT",
+    "/logs"
+    index="logs",
+    headers={"content-type": "application/json", "accept": "application/json"},
+    body={
+        "aliases": {"logs-alias": {}},
+        "mappings": {"name": {"type": "text"}},
+        "missing_parameter": "foo",
+    }
+)
+print(resp)
+```
+
+One benefit of this function is that it lets you use arbitrary headers, such as the `es-security-runas-user` header used to [impersonate users](https://www.elastic.co/guide/en/elasticsearch/reference/current/run-as-privilege.html).
+
+
+## Options
+
+You can specify options such as request timeouts or retries using the `.options()` API, see the [Configuration](./configuration.md) page for details.
\ No newline at end of file
diff -pruN 8.17.2-2/docs/reference/toc.yml 9.2.0-1/docs/reference/toc.yml
--- 8.17.2-2/docs/reference/toc.yml	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/reference/toc.yml	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,22 @@
+toc:
+  - file: index.md
+  - file: getting-started.md
+  - file: installation.md
+  - file: connecting.md
+  - file: configuration.md
+  - file: querying.md
+  - file: esql-query-builder.md
+  - file: async.md
+  - file: integrations.md
+    children:
+      - file: opentelemetry.md
+      - file: esql-pandas.md
+  - file: examples.md
+  - file: elasticsearch-dsl.md
+    children:
+      - file: dsl_configuration.md
+      - file: dsl_tutorials.md
+      - file: dsl_how_to_guides.md
+      - file: dsl_examples.md
+      - file: dsl_migrating.md
+  - file: client-helpers.md
diff -pruN 8.17.2-2/docs/release-notes/breaking-changes.md 9.2.0-1/docs/release-notes/breaking-changes.md
--- 8.17.2-2/docs/release-notes/breaking-changes.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/release-notes/breaking-changes.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,53 @@
+---
+navigation_title: "Breaking changes"
+---
+
+# Elasticsearch Python Client breaking changes [elasticsearch-python-client-breaking-changes]
+Breaking changes can impact your Elastic applications, potentially disrupting normal operations. Before you upgrade, carefully review the Elasticsearch Python Client breaking changes and take the necessary steps to mitigate any issues. To learn how to upgrade, check [Upgrade](docs-content://deploy-manage/upgrade.md).
+
+% ## Next version [elasticsearch-python-client-nextversion-breaking-changes]
+
+% ::::{dropdown} Title of breaking change 
+% Description of the breaking change.
+% For more information, check [PR #](PR link).
+% **Impact**<br> Impact of the breaking change.
+% **Action**<br> Steps for mitigating deprecation impact.
+% ::::
+
+## 9.0.0 [elasticsearch-python-client-900-breaking-changes]
+
+::::{dropdown} Remove deprecated Elasticsearch() options
+The `timeout`, `randomize_hosts`, `host_info_callback`, `sniffer_timeout`, `sniff_on_connection_fail` and `maxsize` parameters were deprecated in elasticsearch-py 8.0 and are now removed from `Elasticsearch.__init__()`.
+For more information, check [PR #2840](https://github.com/elastic/elasticsearch-py/pull/2840).
+
+**Impact**<br> These parameters were removed in favor of more descriptive versions. Using any of these parameters will prevent instantiating the Elasticsearch client.
+
+**Action**<br> These parameters can be replaced as follows:
+ * `timeout` is now `request_timeout`
+ * `randomize_hosts` is now `randomize_nodes_in_pool`
+ * `host_info_callback` is now `sniffed_node_callback`
+ * `sniffer_timeout` is now `min_delay_between_sniffing`
+ * `sniff_on_connection_fail` is now `sniff_on_node_failure`
+ * `maxsize` is now `connections_per_node`
+::::
+
+::::{dropdown} Remove deprecated url_prefix and use_ssl host keys
+When instantiating a new client, `hosts` can be specified as a dictionary. The `url_prefix` and `use_ssl` keys are no longer allowed.
+For more information, check [PR #2797](https://github.com/elastic/elasticsearch-py/pull/2797).
+
+**Impact**<br>
+Using any of these parameters will prevent instantiating the Elasticsearch client.
+
+**Action**<br>
+The parameters can be replaced as follows:
+ * `use_ssl` isn't needed, as a scheme is required since elasticsearch-py 8.0 (`http` or `https`)
+ * `url_prefix` should be replaced with `path_prefix`, which is more descriptive. This functionality allows you to deploy Elasticsearch under a specific path, such as `http://host:port/path/to/elasticsearch`, instead of the default root path (`http://host:port/`)
+::::
+
+::::{dropdown} Remove APIs
+Elasticsearch 9 removed the kNN search and Unfreeze index APIs.
+
+**Action**<br>
+ * The kNN search API has been replaced by the `knn` option in the search API since Elasticsearch 8.4.
+ * The Unfreeze index API was deprecated in Elasticsearch 7.14 and has been removed in Elasticsearch 9.
+ ::::
diff -pruN 8.17.2-2/docs/release-notes/deprecations.md 9.2.0-1/docs/release-notes/deprecations.md
--- 8.17.2-2/docs/release-notes/deprecations.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/release-notes/deprecations.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,26 @@
+---
+navigation_title: "Deprecations"
+---
+
+# Elasticsearch Python Client deprecations [elasticsearch-python-client-deprecations]
+Over time, certain Elastic functionality becomes outdated and is replaced or removed. To help with the transition, Elastic deprecates functionality for a period before removal, giving you time to update your applications.
+
+Review the deprecated functionality for Elasticsearch Python Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md).
+
+% ## Next version [elasticsearch-python-client-versionnext-deprecations]
+
+% ::::{dropdown} Deprecation title
+% Description of the deprecation.
+% For more information, check [PR #](PR link).
+% **Impact**<br> Impact of deprecation. 
+% **Action**<br> Steps for mitigating deprecation impact.
+% ::::
+
+% ## 9.0.0 [elasticsearch-python-client-900-deprecations]
+
+% ::::{dropdown} Deprecation title
+% Description of the deprecation.
+% For more information, check [PR #](PR link).
+% **Impact**<br> Impact of deprecation. 
+% **Action**<br> Steps for mitigating deprecation impact.
+% ::::
\ No newline at end of file
diff -pruN 8.17.2-2/docs/release-notes/index.md 9.2.0-1/docs/release-notes/index.md
--- 8.17.2-2/docs/release-notes/index.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/release-notes/index.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,271 @@
+---
+navigation_title: "Elasticsearch Python Client"
+mapped_pages:
+  - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/release-notes.html
+---
+
+# Elasticsearch Python Client release notes [elasticsearch-python-client-release-notes]
+
+Review the changes, fixes, and more in each version of Elasticsearch Python Client.
+
+To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31).
+
+% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections.
+
+% ## version.next [elasticsearch-python-client-next-release-notes]
+
+% ### Features and enhancements [elasticsearch-python-client-next-features-enhancements]
+% *
+
+% ### Fixes [elasticsearch-python-client-next-fixes]
+## 9.2.0 (2025-10-28)
+
+### Enhancements
+
+* Support Trio when using the `HTTPX <https://www.python-httpx.org/>`_ async client ([#3089](https://github.com/elastic/elasticsearch-py/pull/3089))
+* Pydantic integration for the DSL module ([#3086](https://github.com/elastic/elasticsearch-py/pull/3086))
+* Add `flush_after_seconds` option to `streaming_bulk()` ([#3064](https://github.com/elastic/elasticsearch-py/pull/3064))
+* Add `TS`, `FUSE` and `INLINE STATS` commands to the ES|QL query builder ([#3096](https://github.com/elastic/elasticsearch-py/pull/3096))
+
+### Bug Fixes
+
+* DSL: support passing inner documents as `AttrDict` instances ([#3080](https://github.com/elastic/elasticsearch-py/pull/3080))
+* DSL: add some recently added field classes as top-level exports for the package ([#3078](https://github.com/elastic/elasticsearch-py/pull/3078))
+
+### API
+
+- Add `streams` namespace with `streams.logs_disable`, `streams.logs_enable`, `streams.status` APIs
+- Add `inference.contextualai` API
+- Add `security.get_stats` API
+- Add `bytes` and `time` parameters to various APIs in the `cat` namespace.
+- Add `include_execution_metadata` parameter to `esql.async_query` and `esql.query` APIs
+- Add `index_template` parameter to `indices.simulate_index_template` API
+- Add `input_type` parameter to `inference.text_embedding` API
+- Add `field_access_pattern` parameter to `ingest.put_pipeline` API
+- Removed unsupported `size` parameter from `reindex` API
+
+#### Serverless-specific
+
+- Add `project` namespace with `project.tags` API
+- Add `project_routing` parameter to `count`, `field_caps`, `msearch`, `msearch_template`, `open_point_in_time`, `search`, `search_mvt`, `search_template`, `async_search.submit`, `cat.count`, `eql.search`, `indices.resolve_index`, `sql.query` APIs
+
+### DSL
+
+- New `CartesianBounds`, `CartesianCentroid`, `ChangePoint` aggregations
+- Add `p_value` parameter to `SignificantTerms` aggregation
+- Add `fields` parameter to `SemanticText` field
+- Add `visit_percentage` parameter to `Knn` query
+- Add `on_disk_rescore` field to `DenseVectorIndexOptions` type
+- Add `sparse_vector` field to `SemanticTextIndexOptions` type
+
+### Other
+
+* Add 3.14 to CI builds ([#3103](https://github.com/elastic/elasticsearch-py/pull/3103))
+* Drop Python 3.9 support ([#3114](https://github.com/elastic/elasticsearch-py/pull/3114))
+
+
+## 9.1.1 (2025-09-11)
+
+### Enhancements
+
+* ES|QL query builder integration with the DSL module ([#3058](https://github.com/elastic/elasticsearch-py/pull/3058))
+* ES|QL query builder robustness fixes ([#3017](https://github.com/elastic/elasticsearch-py/pull/3017))
+* Fix ES|QL `multi_match()` signature ([#3052](https://github.com/elastic/elasticsearch-py/pull/3052))
+
+### API
+
+* Add support for ES|QL query builder objects to ES|QL Query and Async Query APIs
+* Add Transform Set Upgrade Mode API
+* Fix type of `fields` parameter of Term Vectors API to array of strings
+* Fix type of `params` parameter of SQL Query API to array
+
+### DSL
+
+* Preserve the `skip_empty` setting in `to_dict()` recursive serializations ([#3041](https://github.com/elastic/elasticsearch-py/pull/3041))
+* Add `separator_group` and `separators` attributes to `ChunkingSettings` type
+* Add `primary` attribute to `ShardFailure` type
+* Fix type of `key` attribute of `ArrayPercentilesItem` to float
+
+
+## 9.1.0 (2025-07-30)
+
+### Enhancements
+
+* ES|QL query builder (technical preview) ([#2997](https://github.com/elastic/elasticsearch-py/pull/2997))
+* Update OpenTelemetry conventions ([#2999](https://github.com/elastic/elasticsearch-py/pull/2999))
+* Add option to disable accurate reporting of file and line location in warnings (Fixes #3003) ([#3006](https://github.com/elastic/elasticsearch-py/pull/3006))
+
+### APIs
+
+* Remove `if_primary_term`, `if_seq_no` and `op_type` from Create API
+* Remove `master_timeout` from Ingest Get Ip Location Database API
+* Remove `application`, `priviledge` and `username` from the Security Get User API
+* Rename `type_query_string` to `type` in License Post Start Trial API
+* Add `require_data_stream` to Index API
+* Add `settings_filter` to Cluster Get Component Template API
+* Add `cause` to Cluster Put Component Template API
+* Add `master_timeout` to Cluster State API
+* Add `ccs_minimize_roundtrips` to EQL Search API
+* Add `keep_alive` and `keep_on_completion` to ES|QL Async Query API
+* Add `format` to ES|QL Async Query Get API
+* Add ES|QL Get Query and List Queries APIs
+* Add Indices Delete Data Stream Options API
+* Add Indices Get Data Stream Options and Put Data Stream Options APIS
+* Add Indices Get Data Stream Settings and Put Data Stream Settings APIs
+* Add `allow_no_indices`, `expand_wildcards` and `ignore_available` to Indices Recovery API
+* Add Indices Remove Block API
+* Add Amazon Sagemaker to Inference API
+* Add `input_type` to Inference API
+* Add `timeout` to all Inference Put APIs
+* Add Inference Put Custom API
+* Add Inference Put DeepSeek API
+* Add `task_settings` to Put HuggingFace API
+* Add `refresh` to Security Grant API Key API
+* Add `wait_for_completion` to the Snapshot Delete API
+* Add `state` to Snapshot Get API
+* Add `refresh` to Synonyms Put Synonym, Put Synonym Rule and Delete Synonym Rule APIs
+
+### DSL
+
+* Handle lists in `copy_to` option in DSL field declarations correctly (Fixes #2992) ([#2993](https://github.com/elastic/elasticsearch-py/pull/2993))
+* Add `index_options` to SparseVector type
+* Add SparseVectorIndexOptions type
+* Add `key` to FiltersBucket type
+
+Other changes
+
+* Drop support for Python 3.8 ([#3001](https://github.com/elastic/elasticsearch-py/pull/3001))
+
+
+## 9.0.4 (2025-09-11)
+
+### Enhancements
+
+* ES|QL query builder integration with the DSL module ([#3058](https://github.com/elastic/elasticsearch-py/pull/3058))
+* ES|QL query builder robustness fixes ([#3017](https://github.com/elastic/elasticsearch-py/pull/3017))
+* Fix ES|QL `multi_match()` signature ([#3052](https://github.com/elastic/elasticsearch-py/pull/3052))
+
+### API
+
+* Add support for ES|QL query builder objects to ES|QL Query and Async Query APIs
+* Add Transform Set Upgrade Mode API
+* Fix type of `fields` parameter of Term Vectors API to array of strings
+* Fix type of `params` parameter of SQL Query API to array
+
+### DSL
+
+* Preserve the `skip_empty` setting in `to_dict()` recursive serializations ([#3041](https://github.com/elastic/elasticsearch-py/pull/3041))
+* Add `primary` attribute to `ShardFailure` type
+* Fix type of `key` attribute of `ArrayPercentilesItem` to float
+
+## 9.0.3 (2025-07-30)
+
+### Enhancements
+
+* ES|QL query builder (technical preview) ([#2997](https://github.com/elastic/elasticsearch-py/pull/2997))
+* Add option to disable accurate reporting of file and line location in warnings (Fixes #3003) ([#3006](https://github.com/elastic/elasticsearch-py/pull/3006))
+
+### APIs
+
+* Remove `if_primary_term`, `if_seq_no` and `op_type` from Create API
+* Remove `stored_fields` from Get Source API
+* Remove `master_timeout` from Ingest Get Ip Location Database API
+* Remove `application`, `priviledge` and `username` from the Security Get User API
+* Rename `type_query_string` to `type` in License Post Start Trial API
+* Add `require_data_stream` to Index API
+* Add `settings_filter` to Cluster Get Component Template API
+* Add `cause` to Cluster Put Component Template API
+* Add `ccs_minimize_roundtrips` to EQL Search API
+* Add `keep_alive` and `keep_on_completion` to ES|QL Async Query API
+* Add `format` to ES|QL Async Query Get API
+* Add `allow_no_indices`, `expand_wildcards` and `ignore_available` to Indices Recovery API
+* Add `timeout` to all Inference Put APIs
+* Add `refresh` to Security Get User Profile API
+* Add `wait_for_completion` to the Snapshot Delete API
+
+### DSL
+
+* Handle lists in `copy_to` field option correctly (Fixes #2992) ([#2993](https://github.com/elastic/elasticsearch-py/pull/2993))
+* Add `key` to FiltersBucket type
+
+
+## 9.0.2 (2025-06-05) [elasticsearch-python-client-902-release-notes]
+
+### DSL
+
+* Add `rescore_vector` to `DenseVectorIndexOptions`
+
+
+## 9.0.1 (2025-04-28) [elasticsearch-python-client-901-release-notes]
+
+### Features and enhancements [elasticsearch-python-client-901-features-enhancements]
+
+* Surface caused_by in ApiError ([#2932](https://github.com/elastic/elasticsearch-py/pull/2932))
+* Clarify Elasticsearch 9.x compatibility ([#2928](https://github.com/elastic/elasticsearch-py/pull/2928))
+* Reorganize Sphinx docs to only include reference pages ([#2776](https://github.com/elastic/elasticsearch-py/pull/2776))
+
+
+## 9.0.0 (2025-04-15) [elasticsearch-python-client-900-release-notes]
+
+:::{tip}
+Upgrade to Elasticsearch 9 before using elasticsearch-py 9.0.0 or later. Using elasticsearch-py 9.0.0 on an Elasticsearch 8 server will fail.
+Since language clients are forward-compatible, you should first upgrade Elasticsearch, then the Elasticsearch client. See the [compatibility documentation](/reference/index.md#_compatibility) for more details.
+:::
+
+### Breaking changes
+
+* Remove deprecated `Elasticsearch()` options ([#2840](https://github.com/elastic/elasticsearch-py/pull/2840))
+* Remove deprecated `url_prefix` and `use_ssl` options ([#2797](https://github.com/elastic/elasticsearch-py/pull/2797))
+
+See the [breaking changes page](breaking-changes.md) for more details.
+
+### Enhancements
+
+* Merge [`elasticsearch-dsl-py`](https://github.com/elastic/elasticsearch-dsl-py/) _ package ([#2736](https://github.com/elastic/elasticsearch-py/pull/2736))
+* Add Python DSL documentation ([#2761](https://github.com/elastic/elasticsearch-py/pull/2761))
+* Autogenerate DSL field classes from schema ([#2780](https://github.com/elastic/elasticsearch-py/pull/2780))
+* Improve DSL documentation examples with class-based queries and type hints ([#2857](https://github.com/elastic/elasticsearch-py/pull/2857))
+* Document the use of `param()` in Python DSL methods ([#2861](https://github.com/elastic/elasticsearch-py/pull/2861))
+* Migrate documentation from AsciiDoc to Markdown format ([#2806](https://github.com/elastic/elasticsearch-py/pull/2806))
+* Document use of sub-clients ([#2798](https://github.com/elastic/elasticsearch-py/pull/2798))
+* Document how to make API calls ([#2843](https://github.com/elastic/elasticsearch-py/pull/2843))
+* Fix `simulate` sub-client documentation ([#2749](https://github.com/elastic/elasticsearch-py/pull/2749))
+
+### APIs
+
+* Remove deprecated `/_knn_search` API
+* Remove Unfreeze an index API
+* Remove `min_compatible_shard_node` from Search and Async Search Submit APIs
+* Remove local parameter from cat alias, Alias exists, and Get alias APIs
+* Remove `verbose` from Index segments API
+* Remove `include_model_definition` from Get trained model configuration info API
+* Remove `wait_for_active_shards` from experimental Get field usage stats API
+* Support soft-deletes in connectors:
+  * Add `hard` to Delete connector API
+  * Add `include_deleted` to Get and List Connector APIs
+* Add `master_timeout` to Migrate to data tiers routing APIs
+* Add `master_timeout` to the Alias exists and Get alias APIs.
+* Add `expand_wildcards` to Create snapshot API
+* Rename incorrect `access_token` to `token` in Logout of OpenID Connect API
+* Add inference APIs: Alibaba Cloud AI Search, Amazon Bedrock, Anthropic, Azure AI Studio, Azure OpenAI, Cohere, Elastic Inference Service (EIS), Elasticsearch, ELSER, Google AI Studio, Google Vertex AI, Hugging Face, Jina AI, Mistral, OpenAI, and Voyage AI
+* Add Elastic Inference Service (EIS) chat completion API
+* Add Reindex legacy backing indices APIs
+* Add Create an index from a source index API
+* Add `include_source_on_error` to Create, Index, Update and Bulk APIs
+* Add Stop async ES|QL query API
+* Add `timeout` to Resolve Cluster API
+* Add `adaptive_allocations` body field to Start and Update a trained model deployment API
+* Rename `index_template_subtitutions` to `index_template_substitutions` in Simulate data ingestion API
+* Add `if_primary_term`, `if_seq_no`, `op_type`, `require_alias` and `require_data_stream` to Create API
+* Add `max_concurrent_shard_requests` to Open point in time API
+* Add `local` and `flat_settings` to Check index templates API
+* Add `reopen` to Update index settings API
+* Add `resource` to Reload search analyzer API
+* Add `lazy` to Roll over to a new index API
+* Add `cause` and `create` to Simulate index template APIs
+* Add inference APIs: Alibaba Cloud AI Search, Amazon Bedrock, Anthropic, Azure AI Studio, Azure OpenAI, Cohere, Elasticsearch, ELSER, Google AI Studio, Google Vertex AI, Hugging Face, Jina AI, Mistral, OpenAI, and Voyage AI
+
+### DSL
+ * Add `ignore_malformed`, `script`,  `on_script_error` and `time_series_dimension` to Boolean field
+ * Add `index` to GeoShape field
+ * Add `search_inference_id` to SemanticText field
diff -pruN 8.17.2-2/docs/release-notes/known-issues.md 9.2.0-1/docs/release-notes/known-issues.md
--- 8.17.2-2/docs/release-notes/known-issues.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/release-notes/known-issues.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,20 @@
+---
+navigation_title: "Known issues"
+
+---
+
+# Elasticsearch Python Client known issues [elasticsearch-python-client-known-issues]
+
+% Use the following template to add entries to this page.
+
+% :::{dropdown} Title of known issue
+% **Details** 
+% On [Month/Day/Year], a known issue was discovered that [description of known issue].
+
+% **Workaround** 
+% Workaround description.
+
+% **Resolved**
+% On [Month/Day/Year], this issue was resolved.
+
+:::
\ No newline at end of file
diff -pruN 8.17.2-2/docs/release-notes/toc.yml 9.2.0-1/docs/release-notes/toc.yml
--- 8.17.2-2/docs/release-notes/toc.yml	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/release-notes/toc.yml	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,5 @@
+toc:
+  - file: index.md
+  - file: known-issues.md
+  - file: breaking-changes.md
+  - file: deprecations.md
\ No newline at end of file
Binary files 8.17.2-2/docs/sphinx/_static/images/create-api-key.png and 9.2.0-1/docs/sphinx/_static/images/create-api-key.png differ
diff -pruN 8.17.2-2/docs/sphinx/api/indices.rst 9.2.0-1/docs/sphinx/api/indices.rst
--- 8.17.2-2/docs/sphinx/api/indices.rst	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/sphinx/api/indices.rst	2025-10-28 16:50:53.000000000 +0000
@@ -6,4 +6,4 @@ Indices
    :no-index:
 
 .. autoclass:: IndicesClient
-   :members:
\ No newline at end of file
+   :members:
diff -pruN 8.17.2-2/docs/sphinx/api/project.rst 9.2.0-1/docs/sphinx/api/project.rst
--- 8.17.2-2/docs/sphinx/api/project.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/api/project.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,9 @@
+.. _project:
+
+Project
+-------
+.. py:module:: elasticsearch.client
+   :no-index:
+
+.. autoclass:: ProjectClient
+   :members:
diff -pruN 8.17.2-2/docs/sphinx/api/streams.rst 9.2.0-1/docs/sphinx/api/streams.rst
--- 8.17.2-2/docs/sphinx/api/streams.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/api/streams.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,9 @@
+.. _streams:
+
+Streams
+-------
+.. py:module:: elasticsearch.client
+   :no-index:
+
+.. autoclass:: StreamsClient
+   :members:
diff -pruN 8.17.2-2/docs/sphinx/api.rst 9.2.0-1/docs/sphinx/api.rst
--- 8.17.2-2/docs/sphinx/api.rst	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/sphinx/api.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,59 +0,0 @@
-.. _api:
-
-Elasticsearch API Reference
-===========================
-
-All the API calls map the raw REST API as closely as possible, including the
-distinction between required and optional arguments to the calls. Keyword
-arguments are required for all calls.
-
-.. note::
-
-   Some API parameters in Elasticsearch are reserved keywords in Python.
-   For example the ``from`` query parameter for pagination would be aliased as
-   ``from_``.
-
-.. toctree::
-   :maxdepth: 1
-
-   api/elasticsearch
-   api/async-search
-   api/autoscaling
-   api/cat
-   api/ccr
-   api/cluster
-   api/connector
-   api/dangling-indices
-   api/enrich-policies
-   api/eql
-   api/esql
-   api/fleet
-   api/graph-explore
-   api/index-lifecycle-management
-   api/indices
-   api/inference
-   api/ingest-pipelines
-   api/license
-   api/logstash
-   api/migration
-   api/ml
-   api/monitoring
-   api/nodes
-   api/query-rules
-   api/rollup-indices
-   api/search-application
-   api/searchable-snapshots
-   api/security
-   api/shutdown
-   api/simulate
-   api/snapshot-lifecycle-management
-   api/snapshots
-   api/snapshottable-features
-   api/sql
-   api/synonyms
-   api/tls-ssl
-   api/tasks
-   api/text-structure
-   api/transforms
-   api/watcher
-   api/x-pack
diff -pruN 8.17.2-2/docs/sphinx/api_helpers.rst 9.2.0-1/docs/sphinx/api_helpers.rst
--- 8.17.2-2/docs/sphinx/api_helpers.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/api_helpers.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,26 @@
+.. _helpers:
+
+Helpers
+=======
+
+.. py:module:: elasticsearch.helpers
+
+Streaming Bulk
+--------------
+.. autofunction:: streaming_bulk
+
+Parallel Bulk
+-------------
+.. autofunction:: parallel_bulk
+
+Bulk
+----
+.. autofunction:: bulk
+
+Scan
+----
+.. autofunction:: scan
+
+Reindex
+-------
+.. autofunction:: reindex
diff -pruN 8.17.2-2/docs/sphinx/async.rst 9.2.0-1/docs/sphinx/async.rst
--- 8.17.2-2/docs/sphinx/async.rst	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/sphinx/async.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,237 +0,0 @@
-Using asyncio with Elasticsearch
-================================
-
- .. py:module:: elasticsearch
-    :no-index:
-
-The ``elasticsearch`` package supports async/await with
-`asyncio <https://docs.python.org/3/library/asyncio.html>`_ and `aiohttp <https://docs.aiohttp.org>`_.
-You can either install ``aiohttp`` directly or use the ``[async]`` extra:
-
- .. code-block:: bash
-
-    $ python -m pip install elasticsearch aiohttp
-
-    # - OR -
-
-    $ python -m pip install elasticsearch[async]
-
-Getting Started with Async
---------------------------
-
-After installation all async API endpoints are available via :class:`~elasticsearch.AsyncElasticsearch`
-and are used in the same way as other APIs, just with an extra ``await``:
-
- .. code-block:: python
-
-    import asyncio
-    from elasticsearch import AsyncElasticsearch
-
-    client = AsyncElasticsearch()
-
-    async def main():
-        resp = await client.search(
-            index="documents",
-            body={"query": {"match_all": {}}},
-            size=20,
-        )
-        print(resp)
-
-    loop = asyncio.get_event_loop()
-    loop.run_until_complete(main())
-
-All APIs that are available under the sync client are also available under the async client.
-
-ASGI Applications and Elastic APM
----------------------------------
-
-`ASGI <https://asgi.readthedocs.io>`_ (Asynchronous Server Gateway Interface) is a new way to
-serve Python web applications making use of async I/O to achieve better performance.
-Some examples of ASGI frameworks include FastAPI, Django 3.0+, and Starlette.
-If you're using one of these frameworks along with Elasticsearch then you
-should be using :py:class:`~elasticsearch.AsyncElasticsearch` to avoid blocking
-the event loop with synchronous network calls for optimal performance.
-
-`Elastic APM <https://www.elastic.co/guide/en/apm/agent/python/current/index.html>`_
-also supports tracing of async Elasticsearch queries just the same as
-synchronous queries. For an example on how to configure ``AsyncElasticsearch`` with
-a popular ASGI framework `FastAPI <https://fastapi.tiangolo.com/>`_ and APM tracing
-there is a `pre-built example <https://github.com/elastic/elasticsearch-py/tree/master/examples/fastapi-apm>`_
-in the ``examples/fastapi-apm`` directory.
-
-Frequently Asked Questions
---------------------------
-
-ValueError when initializing ``AsyncElasticsearch``?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If when trying to use ``AsyncElasticsearch`` you receive ``ValueError: You must
-have 'aiohttp' installed to use AiohttpHttpNode`` you should ensure that you
-have ``aiohttp`` installed in your environment (check with ``$ python -m pip
-freeze | grep aiohttp``). Otherwise, async support won't be available.
-
-What about the ``elasticsearch-async`` package?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Previously asyncio was supported separately via the `elasticsearch-async <https://github.com/elastic/elasticsearch-py-async>`_
-package. The ``elasticsearch-async`` package has been deprecated in favor of
-``AsyncElasticsearch`` provided by the ``elasticsearch`` package
-in v7.8 and onwards.
-
-Receiving 'Unclosed client session / connector' warning?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This warning is created by ``aiohttp`` when an open HTTP connection is
-garbage collected. You'll typically run into this when closing your application.
-To resolve the issue ensure that :meth:`~elasticsearch.AsyncElasticsearch.close`
-is called before the :py:class:`~elasticsearch.AsyncElasticsearch` instance is garbage collected.
-
-For example if using FastAPI that might look like this:
-
- .. code-block:: python
-
-    import os
-    from contextlib import asynccontextmanager
-
-    from fastapi import FastAPI
-    from elasticsearch import AsyncElasticsearch
-
-    ELASTICSEARCH_URL = os.environ["ELASTICSEARCH_URL"]
-    client = None
-
-    @asynccontextmanager
-    async def lifespan(app: FastAPI):
-        global client
-        client = AsyncElasticsearch(ELASTICSEARCH_URL)
-        yield
-        await client.close()
-
-    app = FastAPI(lifespan=lifespan)
-
-    @app.get("/")
-    async def main():
-        return await client.info()
-
-You can run this example by saving it to ``main.py`` and executing
-``ELASTICSEARCH_URL=http://localhost:9200 uvicorn main:app``.
-
-
-Async Helpers
--------------
-
-Async variants of all helpers are available in ``elasticsearch.helpers``
-and are all prefixed with ``async_*``. You'll notice that these APIs
-are identical to the ones in the sync :ref:`helpers` documentation.
-
-All async helpers that accept an iterator or generator also accept async iterators
-and async generators.
-
- .. py:module:: elasticsearch.helpers
-    :no-index:
-
-Bulk and Streaming Bulk
-~~~~~~~~~~~~~~~~~~~~~~~
-
- .. autofunction:: async_bulk
-
- .. code-block:: python
-
-    import asyncio
-    from elasticsearch import AsyncElasticsearch
-    from elasticsearch.helpers import async_bulk
-
-    client = AsyncElasticsearch()
-
-    async def gendata():
-        mywords = ['foo', 'bar', 'baz']
-        for word in mywords:
-            yield {
-                "_index": "mywords",
-                "doc": {"word": word},
-            }
-
-    async def main():
-        await async_bulk(client, gendata())
-
-    loop = asyncio.get_event_loop()
-    loop.run_until_complete(main())
-
- .. autofunction:: async_streaming_bulk
-
- .. code-block:: python
-
-    import asyncio
-    from elasticsearch import AsyncElasticsearch
-    from elasticsearch.helpers import async_streaming_bulk
-
-    client = AsyncElasticsearch()
-
-    async def gendata():
-        mywords = ['foo', 'bar', 'baz']
-        for word in mywords:
-            yield {
-                "_index": "mywords",
-                "word": word,
-            }
-
-    async def main():
-        async for ok, result in async_streaming_bulk(client, gendata()):
-            action, result = result.popitem()
-            if not ok:
-                print("failed to %s document %s" % ())
-
-    loop = asyncio.get_event_loop()
-    loop.run_until_complete(main())
-
-Scan
-~~~~
-
- .. autofunction:: async_scan
-
- .. code-block:: python
-
-    import asyncio
-    from elasticsearch import AsyncElasticsearch
-    from elasticsearch.helpers import async_scan
-
-    client = AsyncElasticsearch()
-
-    async def main():
-        async for doc in async_scan(
-            client=client,
-            query={"query": {"match": {"title": "python"}}},
-            index="orders-*"
-        ):
-            print(doc)
-
-    loop = asyncio.get_event_loop()
-    loop.run_until_complete(main())
-
-Reindex
-~~~~~~~
-
- .. autofunction:: async_reindex
-
-
-API Reference
--------------
-
- .. py:module:: elasticsearch
-    :no-index:
-
-The API of :class:`~elasticsearch.AsyncElasticsearch` is nearly identical
-to the API of :class:`~elasticsearch.Elasticsearch` with the exception that
-every API call like :py:func:`~elasticsearch.AsyncElasticsearch.search` is
-an ``async`` function and requires an ``await`` to properly return the response
-body.
-
-AsyncElasticsearch
-~~~~~~~~~~~~~~~~~~
-
- .. note::
-
-    To reference Elasticsearch APIs that are namespaced like ``.indices.create()``
-    refer to the sync API reference. These APIs are identical between sync and async.
-
- .. autoclass:: AsyncElasticsearch
-   :members:
diff -pruN 8.17.2-2/docs/sphinx/async_api_helpers.rst 9.2.0-1/docs/sphinx/async_api_helpers.rst
--- 8.17.2-2/docs/sphinx/async_api_helpers.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/async_api_helpers.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,29 @@
+Async Helpers
+=============
+
+Async variants of all helpers are available in ``elasticsearch.helpers``
+and are all prefixed with ``async_*``. You'll notice that these APIs
+are identical to the ones in the sync :ref:`helpers` documentation.
+
+All async helpers that accept an iterator or generator also accept async iterators
+and async generators.
+
+ .. py:module:: elasticsearch.helpers
+    :no-index:
+
+Streaming Bulk
+--------------
+ .. autofunction:: async_streaming_bulk
+
+Bulk
+----
+ .. autofunction:: async_bulk
+
+Scan
+----
+ .. autofunction:: async_scan
+
+Reindex
+-------
+ .. autofunction:: async_reindex
+
diff -pruN 8.17.2-2/docs/sphinx/async_dsl.rst 9.2.0-1/docs/sphinx/async_dsl.rst
--- 8.17.2-2/docs/sphinx/async_dsl.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/async_dsl.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,49 @@
+.. _async_dsl:
+
+Async DSL
+=========
+
+.. py:module:: elasticsearch.dsl
+   :no-index:
+
+Search
+------
+.. autoclass:: AsyncSearch
+   :inherited-members:
+   :members:
+
+Multi-Search
+------------
+.. autoclass:: AsyncMultiSearch
+   :inherited-members:
+   :members:
+
+Document
+--------
+.. autoclass:: AsyncDocument
+   :inherited-members:
+   :members:
+
+Index
+-----
+.. autoclass:: AsyncIndex
+   :inherited-members:
+   :members:
+
+Mapping
+-------
+.. autoclass:: AsyncMapping
+   :inherited-members:
+   :members:
+
+Faceted Search
+--------------
+.. autoclass:: AsyncFacetedSearch
+   :inherited-members:
+   :members:
+
+Update by Query
+---------------
+.. autoclass:: AsyncUpdateByQuery
+   :inherited-members:
+   :members:
diff -pruN 8.17.2-2/docs/sphinx/async_es_api.rst 9.2.0-1/docs/sphinx/async_es_api.rst
--- 8.17.2-2/docs/sphinx/async_es_api.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/async_es_api.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,15 @@
+Async Elasticsearch API
+=======================
+
+ .. py:module:: elasticsearch
+    :no-index:
+
+ .. note::
+
+    To reference Elasticsearch APIs that are namespaced like ``.indices.create()``
+    refer to the sync API reference. These APIs are identical between sync and async.
+
+Elasticsearch
+-------------
+ .. autoclass:: AsyncElasticsearch
+   :members:
diff -pruN 8.17.2-2/docs/sphinx/dsl.rst 9.2.0-1/docs/sphinx/dsl.rst
--- 8.17.2-2/docs/sphinx/dsl.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/dsl.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,49 @@
+.. _dsl:
+
+DSL
+===
+
+.. py:module:: elasticsearch.dsl
+
+Search
+------
+.. autoclass:: Search
+   :inherited-members:
+   :members:
+
+Multi-Search
+------------
+.. autoclass:: MultiSearch
+   :inherited-members:
+   :members:
+
+Document
+--------
+.. autoclass:: Document
+   :inherited-members:
+   :members:
+
+Index
+-----
+.. autoclass:: Index
+   :inherited-members:
+   :members:
+
+Mapping
+-------
+.. autoclass:: Mapping
+   :inherited-members:
+   :members:
+
+Faceted Search
+--------------
+.. autoclass:: FacetedSearch
+   :inherited-members:
+   :members:
+
+Update by Query
+---------------
+.. autoclass:: UpdateByQuery
+   :inherited-members:
+   :members:
+
diff -pruN 8.17.2-2/docs/sphinx/es_api.rst 9.2.0-1/docs/sphinx/es_api.rst
--- 8.17.2-2/docs/sphinx/es_api.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/es_api.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,61 @@
+.. _api:
+
+Elasticsearch API
+=================
+
+All the API calls map the raw REST API as closely as possible, including the
+distinction between required and optional arguments to the calls. Keyword
+arguments are required for all calls.
+
+.. note::
+
+   Some API parameters in Elasticsearch are reserved keywords in Python.
+   For example the ``from`` query parameter for pagination would be aliased as
+   ``from_``.
+
+.. toctree::
+   :maxdepth: 1
+
+   api/elasticsearch
+   api/async-search
+   api/autoscaling
+   api/cat
+   api/ccr
+   api/cluster
+   api/connector
+   api/dangling-indices
+   api/enrich-policies
+   api/eql
+   api/esql
+   api/fleet
+   api/graph-explore
+   api/index-lifecycle-management
+   api/indices
+   api/inference
+   api/ingest-pipelines
+   api/license
+   api/logstash
+   api/migration
+   api/ml
+   api/monitoring
+   api/nodes
+   api/project
+   api/query-rules
+   api/rollup-indices
+   api/search-application
+   api/searchable-snapshots
+   api/security
+   api/shutdown
+   api/simulate
+   api/snapshot-lifecycle-management
+   api/snapshots
+   api/snapshottable-features
+   api/sql
+   api/streams
+   api/synonyms
+   api/tls-ssl
+   api/tasks
+   api/text-structure
+   api/transforms
+   api/watcher
+   api/x-pack
diff -pruN 8.17.2-2/docs/sphinx/esql.rst 9.2.0-1/docs/sphinx/esql.rst
--- 8.17.2-2/docs/sphinx/esql.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/docs/sphinx/esql.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,100 @@
+ES|QL Query Builder
+===================
+
+Commands
+--------
+
+.. autoclass:: elasticsearch.esql.ESQL
+   :inherited-members:
+   :members:
+
+.. autoclass:: elasticsearch.esql.esql.ESQLBase
+   :inherited-members:
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.From
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Row
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Show
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.ChangePoint
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Completion
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Dissect
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Drop
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Enrich
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Eval
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Fork
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Grok
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Keep
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Limit
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.LookupJoin
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.MvExpand
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Rename
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Sample
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Sort
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Stats
+   :members:
+   :exclude-members: __init__
+
+.. autoclass:: elasticsearch.esql.esql.Where
+   :members:
+   :exclude-members: __init__
+
+Functions
+---------
+
+.. automodule:: elasticsearch.esql.functions
+   :members:
diff -pruN 8.17.2-2/docs/sphinx/helpers.rst 9.2.0-1/docs/sphinx/helpers.rst
--- 8.17.2-2/docs/sphinx/helpers.rst	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/sphinx/helpers.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,146 +0,0 @@
-.. _helpers:
-
-Helpers
-=======
-
-Collection of simple helper functions that abstract some specifics of the raw API.
-
-Connecting
-----------
-
-.. code-block:: python
-
-    from elasticsearch import Elasticsearch
-    
-    client = Elasticsearch("https://.../", api_key="YOUR_API_KEY")
-
-
-Bulk helpers
-------------
-
-There are several helpers for the ``bulk`` API since its requirement for
-specific formatting and other considerations can make it cumbersome if used directly.
-
-All bulk helpers accept an instance of ``Elasticsearch`` class and an iterable
-``actions`` (any iterable, can also be a generator, which is ideal in most
-cases since it will allow you to index large datasets without the need of
-loading them into memory).
-
-The items in the ``action`` iterable should be the documents we wish to index
-in several formats. The most common one is the same  as returned by
-:meth:`~elasticsearch.Elasticsearch.search`, for example:
-
-.. code:: python
-
-    {
-        '_index': 'index-name',
-        '_id': 42,
-        '_routing': 5,
-        'pipeline': 'my-ingest-pipeline',
-        '_source': {
-            "title": "Hello World!",
-            "body": "..."
-        }
-    }
-
-Alternatively, if `_source` is not present, it will pop all metadata fields
-from the doc and use the rest as the document data:
-
-.. code:: python
-
-    {
-        "_id": 42,
-        "_routing": 5,
-        "title": "Hello World!",
-        "body": "..."
-    }
-
-The :meth:`~elasticsearch.Elasticsearch.bulk` api accepts ``index``, ``create``,
-``delete``, and ``update`` actions. Use the ``_op_type`` field to specify an
-action (``_op_type`` defaults to ``index``):
-
-.. code:: python
-
-    {
-        '_op_type': 'delete',
-        '_index': 'index-name',
-        '_id': 42,
-    }
-    {
-        '_op_type': 'update',
-        '_index': 'index-name',
-        '_id': 42,
-        'doc': {'question': 'The life, universe and everything.'}
-    }
-
-
-Example:
-~~~~~~~~
-
-Lets say we have an iterable of data. Lets say a list of words called ``mywords``
-and we want to index those words into individual documents where the structure of the
-document is like ``{"word": "<myword>"}``.
-
-.. code:: python
-
-    from elasticsearch.helpers import bulk
-
-    def gendata():
-        mywords = ['foo', 'bar', 'baz']
-        for word in mywords:
-            yield {
-                "_index": "mywords",
-                "word": word,
-            }
-
-    bulk(client, gendata())
-
-
-For a more complete and complex example please take a look at
-https://github.com/elastic/elasticsearch-py/blob/main/examples/bulk-ingest
-
-The :meth:`~elasticsearch.Elasticsearch.parallel_bulk` api is a wrapper around the :meth:`~elasticsearch.Elasticsearch.bulk` api to provide threading. :meth:`~elasticsearch.Elasticsearch.parallel_bulk` returns a generator which must be consumed to produce results.
-
-To see the results use:
-
-.. code:: python
-
-    for success, info in parallel_bulk(...):
-    if not success:
-        print('A document failed:', info)
-        
-If you don't care about the results, you can use deque from collections:
-
-.. code:: python
-
-    from collections import deque
-    deque(parallel_bulk(...), maxlen=0)
-
-.. note::
-
-    When reading raw json strings from a file, you can also pass them in
-    directly (without decoding to dicts first). In that case, however, you lose
-    the ability to specify anything (index, op_type and even id) on a per-record
-    basis, all documents will just be sent to elasticsearch to be indexed
-    as-is.
-
-
-.. py:module:: elasticsearch.helpers
-
-.. autofunction:: streaming_bulk
-
-.. autofunction:: parallel_bulk
-
-.. autofunction:: bulk
-
-
-Scan
-----
-
-.. autofunction:: scan
-
-
-Reindex
--------
-
-.. autofunction:: reindex
diff -pruN 8.17.2-2/docs/sphinx/index.rst 9.2.0-1/docs/sphinx/index.rst
--- 8.17.2-2/docs/sphinx/index.rst	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/sphinx/index.rst	2025-10-28 16:50:53.000000000 +0000
@@ -1,140 +1,29 @@
 Python Elasticsearch Client
 ===========================
 
-Official low-level client for Elasticsearch. Its goal is to provide common
-ground for all Elasticsearch-related code in Python; because of this it tries
-to be opinion-free and very extendable.
+Welcome to the API documentation of the official Python client for Elasticsearch!
+The goal of this client is to provide common ground for all Elasticsearch-related
+code in Python; because of this it tries to be opinion-free and very extendable.
 
-`Download the latest version of Elasticsearch <https://www.elastic.co/downloads/elasticsearch>`_
-or
-`sign-up <https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page>`_
-for a free trial of Elastic Cloud.
-
-
-Installation
-------------
-
-Install the ``elasticsearch`` package with `pip
-<https://pypi.org/project/elasticsearch>`_:
-
-.. code-block:: console
-
-    $ python -m pip install elasticsearch
-
-If your application uses async/await in Python you can install with
-the ``async`` extra:
-
-.. code-block:: console
-
-    $ python -m pip install elasticsearch[async]
-
-Read more about `how to use asyncio with this project <async.html>`_.
-
-
-Compatibility
--------------
-
-Language clients are forward compatible; meaning that the clients support
-communicating with greater or equal minor versions of Elasticsearch without
-breaking. It does not mean that the clients automatically support new features
-of newer Elasticsearch versions; it is only possible after a release of a new
-client version. For example, a 8.12 client version won't automatically support
-the new features of the 8.13 version of Elasticsearch, the 8.13 client version
-is required for that. Elasticsearch language clients are only backwards
-compatible with default distributions and without guarantees made.
-
-+-----------------------+-------------------------+-----------+
-| Elasticsearch version | elasticsearch-py branch | Supported |
-+=======================+=========================+===========+
-| main                  | main                    |           |
-+-----------------------+-------------------------+-----------+
-| 8.x                   | 8.x                     | 8.x       |
-+-----------------------+-------------------------+-----------+
-| 7.x                   | 7.x                     | 7.17      |
-+-----------------------+-------------------------+-----------+
-
-If you need multiple versions installed at the same time, versions are
-also released, such as ``elasticsearch7`` and ``elasticsearch8``.
-
-
-Example Usage
--------------
-
-.. code-block:: python
-
-    from datetime import datetime
-    from elasticsearch import Elasticsearch
-
-    client = Elasticsearch("http://localhost:9200/", api_key="YOUR_API_KEY")
-
-    doc = {
-        "author": "kimchy",
-        "text": "Elasticsearch: cool. bonsai cool.",
-        "timestamp": datetime.now(),
-    }
-    resp = client.index(index="test-index", id=1, document=doc)
-    print(resp["result"])
-
-    resp = client.get(index="test-index", id=1)
-    print(resp["_source"])
-
-    client.indices.refresh(index="test-index")
-
-    resp = client.search(index="test-index", query={"match_all": {}})
-    print("Got {} hits:".format(resp["hits"]["total"]["value"]))
-    for hit in resp["hits"]["hits"]:
-        print("{timestamp} {author} {text}".format(**hit["_source"]))
-
-See more examples in the :ref:`quickstart` page.
-
-
-
-Features
---------
-
-This client was designed as very thin wrapper around Elasticsearch's REST API to
-allow for maximum flexibility. This means that there are no opinions in this
-client; it also means that some of the APIs are a little cumbersome to use from
-Python. We have created some :ref:`helpers` to help with this issue as well as
-a more high level library (`elasticsearch-dsl`_) on top of this one to provide
-a more convenient way of working with Elasticsearch.
-
-
-
-Elasticsearch-DSL
------------------
-
-For a more high level client library with more limited scope, have a look at
-`elasticsearch-dsl`_ - a more pythonic library sitting on top of
-``elasticsearch-py``.
-
-`elasticsearch-dsl`_ provides a more convenient and idiomatic way to write and manipulate
-`queries`_ by mirroring the terminology and structure of Elasticsearch JSON DSL
-while exposing the whole range of the DSL from Python
-either directly using defined classes or a queryset-like expressions.
-
-It also provides an optional `persistence layer`_ for working with documents as
-Python objects in an ORM-like fashion: defining mappings, retrieving and saving
-documents, wrapping the document data in user-defined classes.
-
-.. _elasticsearch-dsl: https://elasticsearch-dsl.readthedocs.io/
-.. _queries: https://elasticsearch-dsl.readthedocs.io/en/latest/search_dsl.html
-.. _persistence layer: https://elasticsearch-dsl.readthedocs.io/en/latest/persistence.html#doctype
+High-level documentation for this client is `also available <https://www.elastic.co/docs/reference/elasticsearch/clients/python>`_.
 
+.. toctree::
+   :maxdepth: 2 
 
-Contents
---------
+   es_api
+   esql
+   dsl
+   api_helpers
+   exceptions
 
 .. toctree::
-   :maxdepth: 3
+   :caption: Async
+   :maxdepth: 2
+
+   async_es_api
+   async_dsl
+   async_api_helpers
 
-   quickstart
-   interactive
-   api
-   exceptions
-   async
-   helpers
-   Release Notes <https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/release-notes.html>
 
 License
 -------
diff -pruN 8.17.2-2/docs/sphinx/interactive.rst 9.2.0-1/docs/sphinx/interactive.rst
--- 8.17.2-2/docs/sphinx/interactive.rst	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/sphinx/interactive.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,107 +0,0 @@
-.. _interactive:
-
-Interactive examples 
-====================
-
-The `elasticsearch-labs <https://github.com/elastic/elasticsearch-labs>`_ repo contains interactive and executable `Python notebooks <https://github.com/elastic/elasticsearch-labs/tree/main/notebooks>`_, sample apps, and resources for testing out Elasticsearch, using the Python client.
-These examples are mainly focused on vector search, hybrid search and generative AI use cases, but you'll also find examples of basic operations like creating index mappings and performing lexical search.
-
-Search notebooks
-----------------
-
-The `Search <https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/search>`_ folder is a good place to start if you're new to Elasticsearch.
-This folder contains a number of notebooks that demonstrate the fundamentals of Elasticsearch, like indexing vectors, running lexical, semantic and *hybrid* searches, and more.
-
-The following notebooks are available:
-
-0. `Quick start <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/00-quick-start.ipynb>`_
-1. `Keyword, querying, filtering <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/01-keyword-querying-filtering.ipynb>`_
-2. `Hybrid search <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/02-hybrid-search.ipynb>`_
-3. `Semantic search with ELSER <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb>`_
-4. `Multilingual semantic search <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/04-multilingual.ipynb>`_
-5. `Query rules <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/05-query-rules.ipynb>`_
-6. `Synonyms API quick start <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/06-synonyms-api.ipynb>`_
-
-Here's a brief overview of what you'll learn in each notebook.
-
-Quick start
-^^^^^^^^^^^
-
-In  the `00-quick-start.ipynb <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/00-quick-start.ipynb>`_ notebook you'll learn how to:
-
-- Use the Elasticsearch Python client for various operations.
-- Create and define an index for a sample dataset with ``dense_vector`` fields.
-- Transform book titles into embeddings using `Sentence Transformers <https://www.sbert.net>`_ and index them into Elasticsearch.
-- Perform k-nearest neighbors (knn) semantic searches.
-- Integrate traditional text-based search with semantic search, for a hybrid search system.
-- Use reciprocal rank fusion (RRF) to intelligently combine search results from different retrieval systems.
-
-Keyword, querying, filtering
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In the `01-keyword-querying-filtering.ipynb <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/01-keyword-querying-filtering.ipynb>`_ notebook, you'll learn how to:
-
-- Use `query and filter contexts <https://www.elastic.co/guide/en/elasticsearch/reference/current/query-filter-context.html>`_ to search and filter documents in Elasticsearch.
-- Execute full-text searches with ``match`` and ``multi-match`` queries.
-- Query and filter documents based on ``text``, ``number``, ``date``, or ``boolean`` values.
-- Run multi-field searches using the ``multi-match`` query.
-- Prioritize specific fields in the ``multi-match`` query for tailored results.
-
-Hybrid search
-^^^^^^^^^^^^^
-
-In the `02-hybrid-search.ipynb <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/02-hybrid-search.ipynb>`_ notebook, you'll learn how to:
-
-- Combine results of traditional text-based search with semantic search, for a hybrid search system.
-- Transform fields in the sample dataset into embeddings using the Sentence Transformer model and index them into Elasticsearch.
-- Use the `RRF API <https://www.elastic.co/guide/en/elasticsearch/reference/current/rrf.html#rrf-api>`_ to combine the results of a ``match`` query and a ``kNN`` semantic search.
-- Walk through a super simple toy example that demonstrates, step by step, how RRF ranking works.
-
-Semantic search with ELSER
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In the `03-ELSER.ipynb <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb>`_ notebook, you'll learn how to:
-
-- Use the Elastic Learned Sparse Encoder (ELSER) for text expansion-powered semantic search, out of the box — without training, fine-tuning, or embeddings generation.
-- Download and deploy the ELSER model in your Elastic environment.
-- Create an Elasticsearch index named `search-movies` with specific mappings and index a dataset of movie descriptions.
-- Create an ingest pipeline containing an inference processor for ELSER model execution.
-- Reindex the data from `search-movies` into another index, `elser-movies`, using the ELSER pipeline for text expansion.
-- Observe the results of running the documents through the model by inspecting the additional terms it adds to documents, which enhance searchability.
-- Perform simple keyword searches on the `elser-movies` index to assess the impact of ELSER's text expansion.
-- Execute ELSER-powered semantic searches using the ``text_expansion`` query.
-
-Multilingual semantic search
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In the `04-multilingual.ipynb <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/04-multilingual.ipynb>`_ notebook, you'll learn how to:
-
-- Use a multilingual embedding model for semantic search across languages.
-- Transform fields in the sample dataset into embeddings using the Sentence Transformer model and index them into Elasticsearch.
-- Use filtering with a ``kNN`` semantic search.
-- Walk through a super simple toy example that demonstrates, step by step, how multilingual search works across languages, and within non-English languages.
-
-Query rules
-^^^^^^^^^^^
-
-In the `05-query-rules.ipynb <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/05-query-rules.ipynb>`_ notebook, you'll learn how to:
-
-- Use the query rules management APIs to create and edit promotional rules based on contextual queries.
-- Apply these query rules by using the ``rule_query`` in Query DSL.
-
-Synonyms API quick start
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-In the `06-synonyms-api.ipynb <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/06-synonyms-api.ipynb>`_ notebook, you'll learn how to:
-
-- Use the synonyms management API to create a synonyms set to enhance your search recall.
-- Configure an index to use search-time synonyms.
-- Update synonyms in real time.
-- Run queries that are enhanced by synonyms.
-
-Other notebooks
----------------
-
-- `Generative AI <https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/generative-ai>`_. Notebooks that demonstrate various use cases for Elasticsearch as the retrieval engine and vector store for LLM-powered applications.
-- `Integrations <https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations>`_. Notebooks that demonstrate how to integrate popular services and projects with Elasticsearch, including OpenAI, Hugging Face, and LlamaIndex
-- `Langchain <https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/langchain>`_. Notebooks that demonstrate how to integrate Elastic with LangChain, a framework for developing applications powered by language models.
diff -pruN 8.17.2-2/docs/sphinx/quickstart.rst 9.2.0-1/docs/sphinx/quickstart.rst
--- 8.17.2-2/docs/sphinx/quickstart.rst	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/docs/sphinx/quickstart.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,167 +0,0 @@
-.. _quickstart:
-
-Quickstart
-==========
-
-This guide shows you how to install the Elasticsearch Python client and perform basic
-operations like indexing or searching documents.
-
-Requirements
-------------
-
-- `Python <https://www.python.org/>`_ 3.8 or newer
-- `pip <https://pip.pypa.io/en/stable/>`_
-
-
-Installation
-------------
-
-To install the client, run the following command:
-
-.. code-block:: console
-
-    $ python -m pip install elasticsearch
-
-
-Connecting
-----------
-
-You can connect to Elastic Cloud using an API key and the Cloud ID.
-
-.. code-block:: python
-
-    from elasticsearch import Elasticsearch
-
-    client = Elasticsearch(cloud_id="YOUR_CLOUD_ID", api_key="YOUR_API_KEY")
-
-Your Cloud ID can be found on the **My deployment** page of your deployment 
-under **Cloud ID**.
-
-You can generate an API key on the **Management** page under Security.
-
-.. image:: ../guide/images/create-api-key.png
-
-Confirm that the connection was successful.
-
-.. code-block:: python
-
-    print(client.info())
-
-Using the client
-----------------
-
-Time to use Elasticsearch! This section walks you through the most important 
-operations of Elasticsearch. The following examples assume that the Python 
-client was instantiated as above.
-
-Create an index with mappings
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-This is how you create the `my_index` index.
-Optionally, you can first define the expected types of your features with a custom mapping.
-
-.. code-block:: python
-
-    mappings = {
-        "properties": {
-            "foo": {"type": "text"},
-            "bar": {
-                "type": "text",
-                "fields": {
-                    "keyword": {
-                        "type": "keyword",
-                        "ignore_above": 256,
-                    }
-                },
-            },
-        }
-    }
-
-    client.indices.create(index="my_index", mappings=mappings)
-
-Indexing documents
-^^^^^^^^^^^^^^^^^^
-
-This indexes a document with the index API:
-
-.. code-block:: python
-
-    client.index(
-        index="my_index",
-        id="my_document_id",
-        document={
-            "foo": "foo",
-            "bar": "bar",
-        },
-    )
-
-You can also index multiple documents at once with the bulk helper function:
-
-.. code-block:: python
-
-    from elasticsearch import helpers
-
-    def generate_docs():
-        for i in range(10):
-            yield {
-                "_index": "my_index",
-                "foo": f"foo {i}",
-                "bar": "bar",
-            }
-            
-    helpers.bulk(client, generate_docs())
-
-These helpers are the recommended way to perform bulk ingestion. While it is also possible to perform bulk ingestion using ``client.bulk`` directly, the helpers handle retries, ingesting chunk by chunk and more. See the :ref:`helpers` page for more details.
-
-Getting documents
-^^^^^^^^^^^^^^^^^
-
-You can get documents by using the following code:
-
-.. code-block:: python
-    
-    client.get(index="my_index", id="my_document_id")
-
-
-Searching documents
-^^^^^^^^^^^^^^^^^^^
-
-This is how you can create a single match query with the Python client: 
-
-
-.. code-block:: python
-
-    client.search(index="my_index", query={"match": {"foo": {"query": "foo"}}})
-
-
-Updating documents
-^^^^^^^^^^^^^^^^^^
-
-This is how you can update a document, for example to add a new field:
-
-.. code-block:: python
-
-    client.update(
-        index="my_index",
-        id="my_document_id",
-        doc={
-            "foo": "bar",
-            "new_field": "new value",
-        },
-    )
-
-
-Deleting documents
-^^^^^^^^^^^^^^^^^^
-
-.. code-block:: python
-    
-    client.delete(index="my_index", id="my_document_id")
-
-
-Deleting an index
-^^^^^^^^^^^^^^^^^
-
-.. code-block:: python
-    
-    client.indices.delete(index="my_index")
diff -pruN 8.17.2-2/elasticsearch/__init__.py 9.2.0-1/elasticsearch/__init__.py
--- 8.17.2-2/elasticsearch/__init__.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -28,11 +28,11 @@ from ._version import __versionstr__
 
 # Ensure that a compatible version of elastic-transport is installed.
 _version_groups = tuple(int(x) for x in re.search(r"^(\d+)\.(\d+)\.(\d+)", _elastic_transport_version).groups())  # type: ignore[union-attr]
-if _version_groups < (8, 0, 0) or _version_groups > (9, 0, 0):
+if _version_groups < (9, 1, 0) or _version_groups > (10, 0, 0):
     raise ImportError(
         "An incompatible version of elastic-transport is installed. Must be between "
-        "v8.0.0 and v9.0.0. Install the correct version with the following command: "
-        "$ python -m pip install 'elastic-transport>=8, <9'"
+        "v9.1.0 and v10.0.0. Install the correct version with the following command: "
+        "$ python -m pip install 'elastic-transport>=9.1, <10'"
     )
 
 _version_groups = re.search(r"^(\d+)\.(\d+)\.(\d+)", __versionstr__).groups()  # type: ignore[assignment, union-attr]
diff -pruN 8.17.2-2/elasticsearch/_async/client/__init__.py 9.2.0-1/elasticsearch/_async/client/__init__.py
--- 8.17.2-2/elasticsearch/_async/client/__init__.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -18,7 +18,6 @@
 
 import logging
 import typing as t
-import warnings
 
 from elastic_transport import (
     AsyncTransport,
@@ -64,6 +63,7 @@ from .migration import MigrationClient
 from .ml import MlClient
 from .monitoring import MonitoringClient
 from .nodes import NodesClient
+from .project import ProjectClient
 from .query_rules import QueryRulesClient
 from .rollup import RollupClient
 from .search_application import SearchApplicationClient
@@ -75,6 +75,7 @@ from .slm import SlmClient
 from .snapshot import SnapshotClient
 from .sql import SqlClient
 from .ssl import SslClient
+from .streams import StreamsClient
 from .synonyms import SynonymsClient
 from .tasks import TasksClient
 from .text_structure import TextStructureClient
@@ -181,37 +182,13 @@ class AsyncElasticsearch(BaseClient):
             t.Callable[[t.Dict[str, t.Any], NodeConfig], t.Optional[NodeConfig]]
         ] = None,
         meta_header: t.Union[DefaultType, bool] = DEFAULT,
-        timeout: t.Union[DefaultType, None, float] = DEFAULT,
-        randomize_hosts: t.Union[DefaultType, bool] = DEFAULT,
-        host_info_callback: t.Optional[
-            t.Callable[
-                [t.Dict[str, t.Any], t.Dict[str, t.Union[str, int]]],
-                t.Optional[t.Dict[str, t.Union[str, int]]],
-            ]
-        ] = None,
-        sniffer_timeout: t.Union[DefaultType, None, float] = DEFAULT,
-        sniff_on_connection_fail: t.Union[DefaultType, bool] = DEFAULT,
         http_auth: t.Union[DefaultType, t.Any] = DEFAULT,
-        maxsize: t.Union[DefaultType, int] = DEFAULT,
         # Internal use only
         _transport: t.Optional[AsyncTransport] = None,
     ) -> None:
         if hosts is None and cloud_id is None and _transport is None:
             raise ValueError("Either 'hosts' or 'cloud_id' must be specified")
 
-        if timeout is not DEFAULT:
-            if request_timeout is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'timeout' and 'request_timeout', "
-                    "instead only specify 'request_timeout'"
-                )
-            warnings.warn(
-                "The 'timeout' parameter is deprecated in favor of 'request_timeout'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            request_timeout = timeout
-
         if serializer is not None:
             if serializers is not DEFAULT:
                 raise ValueError(
@@ -220,58 +197,6 @@ class AsyncElasticsearch(BaseClient):
                 )
             serializers = {default_mimetype: serializer}
 
-        if randomize_hosts is not DEFAULT:
-            if randomize_nodes_in_pool is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'randomize_hosts' and 'randomize_nodes_in_pool', "
-                    "instead only specify 'randomize_nodes_in_pool'"
-                )
-            warnings.warn(
-                "The 'randomize_hosts' parameter is deprecated in favor of 'randomize_nodes_in_pool'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            randomize_nodes_in_pool = randomize_hosts
-
-        if sniffer_timeout is not DEFAULT:
-            if min_delay_between_sniffing is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'sniffer_timeout' and 'min_delay_between_sniffing', "
-                    "instead only specify 'min_delay_between_sniffing'"
-                )
-            warnings.warn(
-                "The 'sniffer_timeout' parameter is deprecated in favor of 'min_delay_between_sniffing'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            min_delay_between_sniffing = sniffer_timeout
-
-        if sniff_on_connection_fail is not DEFAULT:
-            if sniff_on_node_failure is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'sniff_on_connection_fail' and 'sniff_on_node_failure', "
-                    "instead only specify 'sniff_on_node_failure'"
-                )
-            warnings.warn(
-                "The 'sniff_on_connection_fail' parameter is deprecated in favor of 'sniff_on_node_failure'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            sniff_on_node_failure = sniff_on_connection_fail
-
-        if maxsize is not DEFAULT:
-            if connections_per_node is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'maxsize' and 'connections_per_node', "
-                    "instead only specify 'connections_per_node'"
-                )
-            warnings.warn(
-                "The 'maxsize' parameter is deprecated in favor of 'connections_per_node'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            connections_per_node = maxsize
-
         # Setting min_delay_between_sniffing=True implies sniff_before_requests=True
         if min_delay_between_sniffing is not DEFAULT:
             sniff_before_requests = True
@@ -293,22 +218,7 @@ class AsyncElasticsearch(BaseClient):
             )
 
         sniff_callback = None
-        if host_info_callback is not None:
-            if sniffed_node_callback is not None:
-                raise ValueError(
-                    "Can't specify both 'host_info_callback' and 'sniffed_node_callback', "
-                    "instead only specify 'sniffed_node_callback'"
-                )
-            warnings.warn(
-                "The 'host_info_callback' parameter is deprecated in favor of 'sniffed_node_callback'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-
-            sniff_callback = create_sniff_callback(
-                host_info_callback=host_info_callback
-            )
-        elif sniffed_node_callback is not None:
+        if sniffed_node_callback is not None:
             sniff_callback = create_sniff_callback(
                 sniffed_node_callback=sniffed_node_callback
             )
@@ -460,6 +370,7 @@ class AsyncElasticsearch(BaseClient):
         self.migration = MigrationClient(self)
         self.ml = MlClient(self)
         self.monitoring = MonitoringClient(self)
+        self.project = ProjectClient(self)
         self.query_rules = QueryRulesClient(self)
         self.rollup = RollupClient(self)
         self.search_application = SearchApplicationClient(self)
@@ -470,6 +381,7 @@ class AsyncElasticsearch(BaseClient):
         self.shutdown = ShutdownClient(self)
         self.sql = SqlClient(self)
         self.ssl = SslClient(self)
+        self.streams = StreamsClient(self)
         self.synonyms = SynonymsClient(self)
         self.text_structure = TextStructureClient(self)
         self.transform = TransformClient(self)
@@ -628,6 +540,7 @@ class AsyncElasticsearch(BaseClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        include_source_on_error: t.Optional[bool] = None,
         list_executed_pipelines: t.Optional[bool] = None,
         pipeline: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
@@ -699,6 +612,7 @@ class AsyncElasticsearch(BaseClient):
           <li>JavaScript: Check out <code>client.helpers.*</code></li>
           <li>.NET: Check out <code>BulkAllObservable</code></li>
           <li>PHP: Check out bulk indexing.</li>
+          <li>Ruby: Check out <code>Elasticsearch::Helpers::BulkHelper</code></li>
           </ul>
           <p><strong>Submitting bulk requests with cURL</strong></p>
           <p>If you're providing text file input to <code>curl</code>, you must use the <code>--data-binary</code> flag instead of plain <code>-d</code>.
@@ -728,13 +642,17 @@ class AsyncElasticsearch(BaseClient):
           Imagine a <code>_bulk?refresh=wait_for</code> request with three documents in it that happen to be routed to different shards in an index with five shards.
           The request will only wait for those three shards to refresh.
           The other two shards that make up the index do not participate in the <code>_bulk</code> request at all.</p>
+          <p>You might want to disable the refresh interval temporarily to improve indexing throughput for large bulk requests.
+          Refer to the linked documentation for step-by-step instructions using the index settings API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-bulk.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk>`_
 
         :param operations:
         :param index: The name of the data stream, index, or index alias to perform bulk
             actions on.
+        :param include_source_on_error: True or false if to include the document source
+            in the error message in case of parsing errors.
         :param list_executed_pipelines: If `true`, the response will include the ingest
             pipelines that were run for each index or create.
         :param pipeline: The pipeline identifier to use to preprocess incoming documents.
@@ -792,6 +710,8 @@ class AsyncElasticsearch(BaseClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if include_source_on_error is not None:
+            __query["include_source_on_error"] = include_source_on_error
         if list_executed_pipelines is not None:
             __query["list_executed_pipelines"] = list_executed_pipelines
         if pipeline is not None:
@@ -851,7 +771,7 @@ class AsyncElasticsearch(BaseClient):
           Clear the search context and results for a scrolling search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-scroll-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll>`_
 
         :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`.
         """
@@ -908,7 +828,7 @@ class AsyncElasticsearch(BaseClient):
           However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/point-in-time-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time>`_
 
         :param id: The ID of the point-in-time.
         """
@@ -929,11 +849,7 @@ class AsyncElasticsearch(BaseClient):
         if not __body:
             if id is not None:
                 __body["id"] = id
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "DELETE",
             __path,
@@ -973,6 +889,7 @@ class AsyncElasticsearch(BaseClient):
         min_score: t.Optional[float] = None,
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         q: t.Optional[str] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         routing: t.Optional[str] = None,
@@ -992,7 +909,7 @@ class AsyncElasticsearch(BaseClient):
           This means that replicas increase the scalability of the count.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-count.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams and indices,
@@ -1006,8 +923,8 @@ class AsyncElasticsearch(BaseClient):
             This parameter can be used only when the `q` query string parameter is specified.
         :param analyzer: The analyzer to use for the query string. This parameter can
             be used only when the `q` query string parameter is specified.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for query string query: `and` or
+            `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as a default when no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -1027,6 +944,10 @@ class AsyncElasticsearch(BaseClient):
             in the result.
         :param preference: The node or shard the operation should be performed on. By
             default, it is random.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param q: The query in Lucene query string syntax. This parameter cannot be used
             with a request body.
         :param query: Defines the search query using Query DSL. A request body query
@@ -1079,6 +1000,8 @@ class AsyncElasticsearch(BaseClient):
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if q is not None:
             __query["q"] = q
         if routing is not None:
@@ -1116,16 +1039,19 @@ class AsyncElasticsearch(BaseClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        include_source_on_error: t.Optional[bool] = None,
         pipeline: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         refresh: t.Optional[
             t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
         ] = None,
+        require_alias: t.Optional[bool] = None,
+        require_data_stream: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         wait_for_active_shards: t.Optional[
             t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
@@ -1165,7 +1091,7 @@ class AsyncElasticsearch(BaseClient):
           This does come at the (very minimal) cost of an additional document parsing pass.
           If the <code>_routing</code> mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.</p>
           <p>NOTE: Data streams do not support custom routing unless they were created with the <code>allow_custom_routing</code> setting enabled in the template.</p>
-          <p>** Distributed**</p>
+          <p><strong>Distributed</strong></p>
           <p>The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.
           After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.</p>
           <p><strong>Active shards</strong></p>
@@ -1188,7 +1114,7 @@ class AsyncElasticsearch(BaseClient):
           The <code>_shards</code> section of the API response reveals the number of shard copies on which replication succeeded and failed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-index_.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create>`_
 
         :param index: The name of the data stream or index to target. If the target doesn't
             exist and matches the name or wildcard (`*`) pattern of an index template
@@ -1198,6 +1124,8 @@ class AsyncElasticsearch(BaseClient):
         :param id: A unique identifier for the document. To automatically generate a
             document ID, use the `POST /<target>/_doc/` request format.
         :param document:
+        :param include_source_on_error: True or false if to include the document source
+            in the error message in case of parsing errors.
         :param pipeline: The ID of the pipeline to use to preprocess incoming documents.
             If the index has a default ingest pipeline specified, setting the value to
             `_none` turns off the default ingest pipeline for this request. If a final
@@ -1206,6 +1134,9 @@ class AsyncElasticsearch(BaseClient):
         :param refresh: If `true`, Elasticsearch refreshes the affected shards to make
             this operation visible to search. If `wait_for`, it waits for a refresh to
             make this operation visible to search. If `false`, it does nothing with refreshes.
+        :param require_alias: If `true`, the destination must be an index alias.
+        :param require_data_stream: If `true`, the request's actions must target a data
+            stream (existing or to be created).
         :param routing: A custom value that is used to route operations to a specific
             shard.
         :param timeout: The period the request waits for the following operations: automatic
@@ -1246,12 +1177,18 @@ class AsyncElasticsearch(BaseClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if include_source_on_error is not None:
+            __query["include_source_on_error"] = include_source_on_error
         if pipeline is not None:
             __query["pipeline"] = pipeline
         if pretty is not None:
             __query["pretty"] = pretty
         if refresh is not None:
             __query["refresh"] = refresh
+        if require_alias is not None:
+            __query["require_alias"] = require_alias
+        if require_data_stream is not None:
+            __query["require_data_stream"] = require_data_stream
         if routing is not None:
             __query["routing"] = routing
         if timeout is not None:
@@ -1293,7 +1230,7 @@ class AsyncElasticsearch(BaseClient):
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         wait_for_active_shards: t.Optional[
             t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
@@ -1328,7 +1265,7 @@ class AsyncElasticsearch(BaseClient):
           It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-delete.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete>`_
 
         :param index: The name of the target index.
         :param id: A unique identifier for the document.
@@ -1397,7 +1334,7 @@ class AsyncElasticsearch(BaseClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("max_docs", "query", "slice"),
+        body_fields=("max_docs", "query", "slice", "sort"),
         parameter_aliases={"from": "from_"},
     )
     async def delete_by_query(
@@ -1441,7 +1378,12 @@ class AsyncElasticsearch(BaseClient):
         ] = None,
         slice: t.Optional[t.Mapping[str, t.Any]] = None,
         slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None,
-        sort: t.Optional[t.Sequence[str]] = None,
+        sort: t.Optional[
+            t.Union[
+                t.Sequence[t.Union[str, t.Mapping[str, t.Any]]],
+                t.Union[str, t.Mapping[str, t.Any]],
+            ]
+        ] = None,
         stats: t.Optional[t.Sequence[str]] = None,
         terminate_after: t.Optional[int] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
@@ -1517,7 +1459,7 @@ class AsyncElasticsearch(BaseClient):
           The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-delete-by-query.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams or indices,
@@ -1533,8 +1475,8 @@ class AsyncElasticsearch(BaseClient):
             used only when the `q` query string parameter is specified.
         :param conflicts: What to do if delete by query hits version conflicts: `abort`
             or `proceed`.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for query string query: `and` or
+            `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as default where no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -1543,7 +1485,7 @@ class AsyncElasticsearch(BaseClient):
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. It supports comma-separated
             values, such as `open,hidden`.
-        :param from_: Starting offset (default: 0)
+        :param from_: Skips the specified number of documents.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param lenient: If `true`, format-based query failures (such as providing text
@@ -1573,7 +1515,7 @@ class AsyncElasticsearch(BaseClient):
         :param slice: Slice the request manually using the provided slice ID and total
             number of slices.
         :param slices: The number of slices this task should be divided into.
-        :param sort: A comma-separated list of `<field>:<direction>` pairs.
+        :param sort: A sort object that specifies the order of deleted documents.
         :param stats: The specific `tag` of the request for logging and statistical purposes.
         :param terminate_after: The maximum number of documents to collect for each shard.
             If a query reaches this limit, Elasticsearch terminates the query early.
@@ -1663,8 +1605,6 @@ class AsyncElasticsearch(BaseClient):
             __query["search_type"] = search_type
         if slices is not None:
             __query["slices"] = slices
-        if sort is not None:
-            __query["sort"] = sort
         if stats is not None:
             __query["stats"] = stats
         if terminate_after is not None:
@@ -1684,6 +1624,8 @@ class AsyncElasticsearch(BaseClient):
                 __body["query"] = query
             if slice is not None:
                 __body["slice"] = slice
+            if sort is not None:
+                __body["sort"] = sort
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -1699,7 +1641,7 @@ class AsyncElasticsearch(BaseClient):
     async def delete_by_query_rethrottle(
         self,
         *,
-        task_id: t.Union[int, str],
+        task_id: str,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -1714,7 +1656,7 @@ class AsyncElasticsearch(BaseClient):
           Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-delete-by-query.html#docs-delete-by-query-rethrottle>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle>`_
 
         :param task_id: The ID for the task.
         :param requests_per_second: The throttle for this request in sub-requests per
@@ -1764,7 +1706,7 @@ class AsyncElasticsearch(BaseClient):
           Deletes a stored script or search template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-stored-script-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script>`_
 
         :param id: The identifier for the stored script or search template.
         :param master_timeout: The period to wait for a connection to the master node.
@@ -1828,7 +1770,7 @@ class AsyncElasticsearch(BaseClient):
         stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
     ) -> HeadApiResponse:
         """
@@ -1848,7 +1790,7 @@ class AsyncElasticsearch(BaseClient):
           Elasticsearch cleans up deleted documents in the background as you continue to index more data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases. It
             supports wildcards (`*`).
@@ -1957,7 +1899,7 @@ class AsyncElasticsearch(BaseClient):
         source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
     ) -> HeadApiResponse:
         """
@@ -1971,7 +1913,7 @@ class AsyncElasticsearch(BaseClient):
           <p>A document's source is not available if it is disabled in the mapping.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases. It
             supports wildcards (`*`).
@@ -2077,7 +2019,7 @@ class AsyncElasticsearch(BaseClient):
           It computes a score explanation for a query and a specific document.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-explain.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain>`_
 
         :param index: Index names that are used to limit the request. Only a single index
             name can be provided to this parameter.
@@ -2086,8 +2028,8 @@ class AsyncElasticsearch(BaseClient):
             This parameter can be used only when the `q` query string parameter is specified.
         :param analyzer: The analyzer to use for the query string. This parameter can
             be used only when the `q` query string parameter is specified.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for query string query: `and` or
+            `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as default where no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -2198,6 +2140,7 @@ class AsyncElasticsearch(BaseClient):
         include_unmapped: t.Optional[bool] = None,
         index_filter: t.Optional[t.Mapping[str, t.Any]] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         types: t.Optional[t.Sequence[str]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
@@ -2212,7 +2155,7 @@ class AsyncElasticsearch(BaseClient):
           For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the <code>keyword</code> family.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-field-caps.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (*). To target all data streams
@@ -2241,6 +2184,11 @@ class AsyncElasticsearch(BaseClient):
             deleted documents) are outside of the provided range. However, not all queries
             can rewrite to `match_none` so this API may return an index even if the provided
             filter matches no document.
+        :param project_routing: Specifies a subset of projects to target for the field-caps
+            query using project metadata tags in a subset of Lucene query syntax. Allowed
+            Lucene queries: the _alias tag and a single value (possibly wildcarded).
+            Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless
+            only.
         :param runtime_mappings: Define ad-hoc runtime fields in the request similar
             to the way it is done in search requests. These fields exist only as part
             of the query and take precedence over fields defined with the same name in
@@ -2278,6 +2226,8 @@ class AsyncElasticsearch(BaseClient):
             __query["include_unmapped"] = include_unmapped
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if types is not None:
             __query["types"] = types
         if not __body:
@@ -2305,6 +2255,7 @@ class AsyncElasticsearch(BaseClient):
     @_rewrite_parameters(
         parameter_aliases={
             "_source": "source",
+            "_source_exclude_vectors": "source_exclude_vectors",
             "_source_excludes": "source_excludes",
             "_source_includes": "source_includes",
         },
@@ -2324,12 +2275,13 @@ class AsyncElasticsearch(BaseClient):
         refresh: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None,
+        source_exclude_vectors: t.Optional[bool] = None,
         source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -2373,12 +2325,12 @@ class AsyncElasticsearch(BaseClient):
           Elasticsearch cleans up deleted documents in the background as you continue to index more data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get>`_
 
         :param index: The name of the index that contains the document.
         :param id: A unique document identifier.
         :param force_synthetic_source: Indicates whether the request forces synthetic
-            `_source`. Use this paramater to test if the mapping supports synthetic `_source`
+            `_source`. Use this parameter to test if the mapping supports synthetic `_source`
             and to get a sense of the worst case performance. Fetches with this parameter
             enabled will be slower than enabling synthetic source natively in the index.
         :param preference: The node or shard the operation should be performed on. By
@@ -2397,6 +2349,7 @@ class AsyncElasticsearch(BaseClient):
         :param routing: A custom value used to route operations to a specific shard.
         :param source: Indicates whether to return the `_source` field (`true` or `false`)
             or lists the fields to return.
+        :param source_exclude_vectors: Whether vectors should be excluded from _source
         :param source_excludes: A comma-separated list of source fields to exclude from
             the response. You can also use this parameter to exclude fields from the
             subset specified in `_source_includes` query parameter. If the `_source`
@@ -2409,8 +2362,8 @@ class AsyncElasticsearch(BaseClient):
         :param stored_fields: A comma-separated list of stored fields to return as part
             of a hit. If no fields are specified, no stored fields are included in the
             response. If this field is specified, the `_source` parameter defaults to
-            `false`. Only leaf fields can be retrieved with the `stored_field` option.
-            Object fields can't be returned;​if specified, the request fails.
+            `false`. Only leaf fields can be retrieved with the `stored_fields` option.
+            Object fields can't be returned; if specified, the request fails.
         :param version: The version number for concurrency control. It must match the
             current version of the document for the request to succeed.
         :param version_type: The version type.
@@ -2442,6 +2395,8 @@ class AsyncElasticsearch(BaseClient):
             __query["routing"] = routing
         if source is not None:
             __query["_source"] = source
+        if source_exclude_vectors is not None:
+            __query["_source_exclude_vectors"] = source_exclude_vectors
         if source_excludes is not None:
             __query["_source_excludes"] = source_excludes
         if source_includes is not None:
@@ -2480,7 +2435,7 @@ class AsyncElasticsearch(BaseClient):
           Retrieves a stored script or search template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-scripting.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script>`_
 
         :param id: The identifier for the stored script or search template.
         :param master_timeout: The period to wait for the master node. If the master
@@ -2529,7 +2484,7 @@ class AsyncElasticsearch(BaseClient):
           <p>Get a list of supported script contexts and their methods.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-script-contexts-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_script_context"
@@ -2568,7 +2523,7 @@ class AsyncElasticsearch(BaseClient):
           <p>Get a list of available script types, languages, and contexts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-script-languages-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_script_language"
@@ -2614,10 +2569,9 @@ class AsyncElasticsearch(BaseClient):
         source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None,
         source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -2633,7 +2587,7 @@ class AsyncElasticsearch(BaseClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get>`_
 
         :param index: The name of the index that contains the document.
         :param id: A unique document identifier.
@@ -2651,8 +2605,6 @@ class AsyncElasticsearch(BaseClient):
             the response.
         :param source_includes: A comma-separated list of source fields to include in
             the response.
-        :param stored_fields: A comma-separated list of stored fields to return as part
-            of a hit.
         :param version: The version number for concurrency control. It must match the
             current version of the document for the request to succeed.
         :param version_type: The version type.
@@ -2686,8 +2638,6 @@ class AsyncElasticsearch(BaseClient):
             __query["_source_excludes"] = source_excludes
         if source_includes is not None:
             __query["_source_includes"] = source_includes
-        if stored_fields is not None:
-            __query["stored_fields"] = stored_fields
         if version is not None:
             __query["version"] = version
         if version_type is not None:
@@ -2733,7 +2683,7 @@ class AsyncElasticsearch(BaseClient):
           When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/health-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report>`_
 
         :param feature: A feature of the cluster, as returned by the top-level health
             report API.
@@ -2788,6 +2738,7 @@ class AsyncElasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         if_primary_term: t.Optional[int] = None,
         if_seq_no: t.Optional[int] = None,
+        include_source_on_error: t.Optional[bool] = None,
         op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None,
         pipeline: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
@@ -2795,11 +2746,12 @@ class AsyncElasticsearch(BaseClient):
             t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
         ] = None,
         require_alias: t.Optional[bool] = None,
+        require_data_stream: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         wait_for_active_shards: t.Optional[
             t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
@@ -2844,9 +2796,7 @@ class AsyncElasticsearch(BaseClient):
           This does come at the (very minimal) cost of an additional document parsing pass.
           If the <code>_routing</code> mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.</p>
           <p>NOTE: Data streams do not support custom routing unless they were created with the <code>allow_custom_routing</code> setting enabled in the template.</p>
-          <ul>
-          <li>** Distributed**</li>
-          </ul>
+          <p><strong>Distributed</strong></p>
           <p>The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.
           After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.</p>
           <p><strong>Active shards</strong></p>
@@ -2899,7 +2849,7 @@ class AsyncElasticsearch(BaseClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-index_.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create>`_
 
         :param index: The name of the data stream or index to target. If the target doesn't
             exist and matches the name or wildcard (`*`) pattern of an index template
@@ -2915,6 +2865,8 @@ class AsyncElasticsearch(BaseClient):
             term.
         :param if_seq_no: Only perform the operation if the document has this sequence
             number.
+        :param include_source_on_error: True or false if to include the document source
+            in the error message in case of parsing errors.
         :param op_type: Set to `create` to only index the document if it does not already
             exist (put if absent). If a document with the specified `_id` already exists,
             the indexing operation will fail. The behavior is the same as using the `<index>/_create`
@@ -2930,6 +2882,8 @@ class AsyncElasticsearch(BaseClient):
             this operation visible to search. If `wait_for`, it waits for a refresh to
             make this operation visible to search. If `false`, it does nothing with refreshes.
         :param require_alias: If `true`, the destination must be an index alias.
+        :param require_data_stream: If `true`, the request's actions must target a data
+            stream (existing or to be created).
         :param routing: A custom value that is used to route operations to a specific
             shard.
         :param timeout: The period the request waits for the following operations: automatic
@@ -2979,6 +2933,8 @@ class AsyncElasticsearch(BaseClient):
             __query["if_primary_term"] = if_primary_term
         if if_seq_no is not None:
             __query["if_seq_no"] = if_seq_no
+        if include_source_on_error is not None:
+            __query["include_source_on_error"] = include_source_on_error
         if op_type is not None:
             __query["op_type"] = op_type
         if pipeline is not None:
@@ -2989,6 +2945,8 @@ class AsyncElasticsearch(BaseClient):
             __query["refresh"] = refresh
         if require_alias is not None:
             __query["require_alias"] = require_alias
+        if require_data_stream is not None:
+            __query["require_data_stream"] = require_data_stream
         if routing is not None:
             __query["routing"] = routing
         if timeout is not None:
@@ -3024,10 +2982,11 @@ class AsyncElasticsearch(BaseClient):
         .. raw:: html
 
           <p>Get cluster info.
-          Get basic build, version, and cluster information.</p>
+          Get basic build, version, and cluster information.
+          ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rest-api-root.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/"
@@ -3051,127 +3010,6 @@ class AsyncElasticsearch(BaseClient):
         )
 
     @_rewrite_parameters(
-        body_fields=(
-            "knn",
-            "docvalue_fields",
-            "fields",
-            "filter",
-            "source",
-            "stored_fields",
-        ),
-        parameter_aliases={"_source": "source"},
-    )
-    @_stability_warning(Stability.EXPERIMENTAL)
-    async def knn_search(
-        self,
-        *,
-        index: t.Union[str, t.Sequence[str]],
-        knn: t.Optional[t.Mapping[str, t.Any]] = None,
-        docvalue_fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None,
-        error_trace: t.Optional[bool] = None,
-        fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        filter: t.Optional[
-            t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]]
-        ] = None,
-        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        human: t.Optional[bool] = None,
-        pretty: t.Optional[bool] = None,
-        routing: t.Optional[str] = None,
-        source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None,
-        stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        body: t.Optional[t.Dict[str, t.Any]] = None,
-    ) -> ObjectApiResponse[t.Any]:
-        """
-        .. raw:: html
-
-          <p>Run a knn search.</p>
-          <p>NOTE: The kNN search API has been replaced by the <code>knn</code> option in the search API.</p>
-          <p>Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents.
-          Given a query vector, the API finds the k closest vectors and returns those documents as search hits.</p>
-          <p>Elasticsearch uses the HNSW algorithm to support efficient kNN search.
-          Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed.
-          This means the results returned are not always the true k closest neighbors.</p>
-          <p>The kNN search API supports restricting the search using a filter.
-          The search will return the top k documents that also match the filter query.</p>
-          <p>A kNN search response has the exact same structure as a search API response.
-          However, certain sections have a meaning specific to kNN search:</p>
-          <ul>
-          <li>The document <code>_score</code> is determined by the similarity between the query and document vector.</li>
-          <li>The <code>hits.total</code> object contains the total number of nearest neighbor candidates considered, which is <code>num_candidates * num_shards</code>. The <code>hits.total.relation</code> will always be <code>eq</code>, indicating an exact value.</li>
-          </ul>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/knn-search-api.html>`_
-
-        :param index: A comma-separated list of index names to search; use `_all` or
-            to perform the operation on all indices.
-        :param knn: The kNN query to run.
-        :param docvalue_fields: The request returns doc values for field names matching
-            these patterns in the `hits.fields` property of the response. It accepts
-            wildcard (`*`) patterns.
-        :param fields: The request returns values for field names matching these patterns
-            in the `hits.fields` property of the response. It accepts wildcard (`*`)
-            patterns.
-        :param filter: A query to filter the documents that can match. The kNN search
-            will return the top `k` documents that also match this filter. The value
-            can be a single query or a list of queries. If `filter` isn't provided, all
-            documents are allowed to match.
-        :param routing: A comma-separated list of specific routing values.
-        :param source: Indicates which source fields are returned for matching documents.
-            These fields are returned in the `hits._source` property of the search response.
-        :param stored_fields: A list of stored fields to return as part of a hit. If
-            no fields are specified, no stored fields are included in the response. If
-            this field is specified, the `_source` parameter defaults to `false`. You
-            can pass `_source: true` to return both source fields and stored fields in
-            the search response.
-        """
-        if index in SKIP_IN_PATH:
-            raise ValueError("Empty value passed for parameter 'index'")
-        if knn is None and body is None:
-            raise ValueError("Empty value passed for parameter 'knn'")
-        __path_parts: t.Dict[str, str] = {"index": _quote(index)}
-        __path = f'/{__path_parts["index"]}/_knn_search'
-        __query: t.Dict[str, t.Any] = {}
-        __body: t.Dict[str, t.Any] = body if body is not None else {}
-        if error_trace is not None:
-            __query["error_trace"] = error_trace
-        if filter_path is not None:
-            __query["filter_path"] = filter_path
-        if human is not None:
-            __query["human"] = human
-        if pretty is not None:
-            __query["pretty"] = pretty
-        if routing is not None:
-            __query["routing"] = routing
-        if not __body:
-            if knn is not None:
-                __body["knn"] = knn
-            if docvalue_fields is not None:
-                __body["docvalue_fields"] = docvalue_fields
-            if fields is not None:
-                __body["fields"] = fields
-            if filter is not None:
-                __body["filter"] = filter
-            if source is not None:
-                __body["_source"] = source
-            if stored_fields is not None:
-                __body["stored_fields"] = stored_fields
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
-        return await self.perform_request(  # type: ignore[return-value]
-            "POST",
-            __path,
-            params=__query,
-            headers=__headers,
-            body=__body,
-            endpoint_id="knn_search",
-            path_parts=__path_parts,
-        )
-
-    @_rewrite_parameters(
         body_fields=("docs", "ids"),
         parameter_aliases={
             "_source": "source",
@@ -3217,7 +3055,7 @@ class AsyncElasticsearch(BaseClient):
           You can include the <code>stored_fields</code> query parameter in the request URI to specify the defaults to use when there are no per-document instructions.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-multi-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget>`_
 
         :param index: Name of the index to retrieve documents from when `ids` are specified,
             or when a document in the `docs` array does not specify an index.
@@ -3328,6 +3166,7 @@ class AsyncElasticsearch(BaseClient):
         max_concurrent_shard_requests: t.Optional[int] = None,
         pre_filter_shard_size: t.Optional[int] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         rest_total_hits_as_int: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         search_type: t.Optional[
@@ -3352,7 +3191,7 @@ class AsyncElasticsearch(BaseClient):
           When sending requests to this endpoint the <code>Content-Type</code> header should be set to <code>application/x-ndjson</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-multi-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch>`_
 
         :param searches:
         :param index: Comma-separated list of data streams, indices, and index aliases
@@ -3379,7 +3218,8 @@ class AsyncElasticsearch(BaseClient):
             computationally expensive named queries on a large number of hits may add
             significant overhead.
         :param max_concurrent_searches: Maximum number of concurrent searches the multi
-            search API can execute.
+            search API can execute. Defaults to `max(1, (# of data nodes * min(search
+            thread pool size, 10)))`.
         :param max_concurrent_shard_requests: Maximum number of concurrent shard requests
             that each sub-search request executes per node.
         :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter
@@ -3388,6 +3228,10 @@ class AsyncElasticsearch(BaseClient):
             roundtrip can limit the number of shards significantly if for instance a
             shard can not match any documents based on its rewrite method i.e., if date
             filters are mandatory to match but the shard bounds and the query are disjoint.
+        :param project_routing: Specifies a subset of projects to target for a search
+            using project metadata tags in a subset Lucene syntax. Allowed Lucene queries:
+            the _alias tag and a single value (possible wildcarded). Examples: _alias:my-project
+            _alias:_origin _alias:*pr* Supported in serverless only.
         :param rest_total_hits_as_int: If true, hits.total are returned as an integer
             in the response. Defaults to false, which returns an object.
         :param routing: Custom routing value used to route search operations to a specific
@@ -3437,6 +3281,8 @@ class AsyncElasticsearch(BaseClient):
             __query["pre_filter_shard_size"] = pre_filter_shard_size
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if rest_total_hits_as_int is not None:
             __query["rest_total_hits_as_int"] = rest_total_hits_as_int
         if routing is not None:
@@ -3475,6 +3321,7 @@ class AsyncElasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         max_concurrent_searches: t.Optional[int] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         rest_total_hits_as_int: t.Optional[bool] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
@@ -3498,7 +3345,7 @@ class AsyncElasticsearch(BaseClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/multi-search-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template>`_
 
         :param search_templates:
         :param index: A comma-separated list of data streams, indices, and aliases to
@@ -3508,6 +3355,10 @@ class AsyncElasticsearch(BaseClient):
             for cross-cluster search requests.
         :param max_concurrent_searches: The maximum number of concurrent searches the
             API can run.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param rest_total_hits_as_int: If `true`, the response returns `hits.total` as
             an integer. If `false`, it returns `hits.total` as an object.
         :param search_type: The type of the search operation.
@@ -3540,6 +3391,8 @@ class AsyncElasticsearch(BaseClient):
             __query["max_concurrent_searches"] = max_concurrent_searches
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if rest_total_hits_as_int is not None:
             __query["rest_total_hits_as_int"] = rest_total_hits_as_int
         if search_type is not None:
@@ -3585,7 +3438,7 @@ class AsyncElasticsearch(BaseClient):
         term_statistics: t.Optional[bool] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -3603,7 +3456,7 @@ class AsyncElasticsearch(BaseClient):
           The mapping used is determined by the specified <code>_index</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-multi-termvectors.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors>`_
 
         :param index: The name of the index that contains the documents.
         :param docs: An array of existing or artificial documents.
@@ -3707,8 +3560,10 @@ class AsyncElasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         index_filter: t.Optional[t.Mapping[str, t.Any]] = None,
+        max_concurrent_shard_requests: t.Optional[int] = None,
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         routing: t.Optional[str] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -3743,7 +3598,7 @@ class AsyncElasticsearch(BaseClient):
           You can check how many point-in-times (that is, search contexts) are open with the nodes stats API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/point-in-time-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time>`_
 
         :param index: A comma-separated list of index names to open point in time; use
             `_all` or empty string to perform the operation on all indices
@@ -3756,14 +3611,20 @@ class AsyncElasticsearch(BaseClient):
         :param expand_wildcards: The type of index that wildcard patterns can match.
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. It supports comma-separated
-            values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`,
-            `hidden`, `none`.
+            values, such as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param index_filter: Filter indices if the provided query rewrites to `match_none`
             on every shard.
+        :param max_concurrent_shard_requests: Maximum number of concurrent shard requests
+            that each sub-search request executes per node.
         :param preference: The node or shard the operation should be performed on. By
             default, it is random.
+        :param project_routing: Specifies a subset of projects to target for the PIT
+            request using project metadata tags in a subset of Lucene query syntax. Allowed
+            Lucene queries: the _alias tag and a single value (possibly wildcarded).
+            Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless
+            only.
         :param routing: A custom value that is used to route operations to a specific
             shard.
         """
@@ -3789,10 +3650,14 @@ class AsyncElasticsearch(BaseClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
+        if max_concurrent_shard_requests is not None:
+            __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if routing is not None:
             __query["routing"] = routing
         if not __body:
@@ -3837,7 +3702,7 @@ class AsyncElasticsearch(BaseClient):
           Creates or updates a stored script or search template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-stored-script-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script>`_
 
         :param id: The identifier for the stored script or search template. It must be
             unique within the cluster.
@@ -3927,7 +3792,7 @@ class AsyncElasticsearch(BaseClient):
           <p>Evaluate the quality of ranked search results over a set of typical search queries.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-rank-eval.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval>`_
 
         :param requests: A set of typical search requests, together with their provided
             ratings.
@@ -3991,7 +3856,7 @@ class AsyncElasticsearch(BaseClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("dest", "source", "conflicts", "max_docs", "script", "size"),
+        body_fields=("dest", "source", "conflicts", "max_docs", "script"),
     )
     async def reindex(
         self,
@@ -4009,7 +3874,6 @@ class AsyncElasticsearch(BaseClient):
         require_alias: t.Optional[bool] = None,
         script: t.Optional[t.Mapping[str, t.Any]] = None,
         scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        size: t.Optional[int] = None,
         slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         wait_for_active_shards: t.Optional[
@@ -4053,113 +3917,17 @@ class AsyncElasticsearch(BaseClient):
           In this case, the response includes a count of the version conflicts that were encountered.
           Note that the handling of other error types is unaffected by the <code>conflicts</code> property.
           Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than <code>max_docs</code> until it has successfully indexed <code>max_docs</code> documents into the target or it has gone through every document in the source query.</p>
-          <p>NOTE: The reindex API makes no effort to handle ID collisions.
-          The last document written will &quot;win&quot; but the order isn't usually predictable so it is not a good idea to rely on this behavior.
-          Instead, make sure that IDs are unique by using a script.</p>
-          <p><strong>Running reindex asynchronously</strong></p>
-          <p>If the request contains <code>wait_for_completion=false</code>, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.
-          Elasticsearch creates a record of this task as a document at <code>_tasks/&lt;task_id&gt;</code>.</p>
-          <p><strong>Reindex from multiple sources</strong></p>
-          <p>If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.
-          That way you can resume the process if there are any errors by removing the partially completed source and starting over.
-          It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.</p>
-          <p>For example, you can use a bash script like this:</p>
-          <pre><code>for index in i1 i2 i3 i4 i5; do
-            curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
-              &quot;source&quot;: {
-                &quot;index&quot;: &quot;'$index'&quot;
-              },
-              &quot;dest&quot;: {
-                &quot;index&quot;: &quot;'$index'-reindexed&quot;
-              }
-            }'
-          done
-          </code></pre>
-          <p>** Throttling**</p>
-          <p>Set <code>requests_per_second</code> to any positive decimal number (<code>1.4</code>, <code>6</code>, <code>1000</code>, for example) to throttle the rate at which reindex issues batches of index operations.
-          Requests are throttled by padding each batch with a wait time.
-          To turn off throttling, set <code>requests_per_second</code> to <code>-1</code>.</p>
-          <p>The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.
-          The padding time is the difference between the batch size divided by the <code>requests_per_second</code> and the time spent writing.
-          By default the batch size is <code>1000</code>, so if <code>requests_per_second</code> is set to <code>500</code>:</p>
-          <pre><code>target_time = 1000 / 500 per second = 2 seconds
-          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
-          </code></pre>
-          <p>Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.
-          This is &quot;bursty&quot; instead of &quot;smooth&quot;.</p>
-          <p><strong>Slicing</strong></p>
-          <p>Reindex supports sliced scroll to parallelize the reindexing process.
-          This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.</p>
-          <p>NOTE: Reindexing from remote clusters does not support manual or automatic slicing.</p>
-          <p>You can slice a reindex request manually by providing a slice ID and total number of slices to each request.
-          You can also let reindex automatically parallelize by using sliced scroll to slice on <code>_id</code>.
-          The <code>slices</code> parameter specifies the number of slices to use.</p>
-          <p>Adding <code>slices</code> to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:</p>
-          <ul>
-          <li>You can see these requests in the tasks API. These sub-requests are &quot;child&quot; tasks of the task for the request with slices.</li>
-          <li>Fetching the status of the task for the request with <code>slices</code> only contains the status of completed slices.</li>
-          <li>These sub-requests are individually addressable for things like cancellation and rethrottling.</li>
-          <li>Rethrottling the request with <code>slices</code> will rethrottle the unfinished sub-request proportionally.</li>
-          <li>Canceling the request with <code>slices</code> will cancel each sub-request.</li>
-          <li>Due to the nature of <code>slices</code>, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.</li>
-          <li>Parameters like <code>requests_per_second</code> and <code>max_docs</code> on a request with <code>slices</code> are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using <code>max_docs</code> with <code>slices</code> might not result in exactly <code>max_docs</code> documents being reindexed.</li>
-          <li>Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.</li>
-          </ul>
-          <p>If slicing automatically, setting <code>slices</code> to <code>auto</code> will choose a reasonable number for most indices.
-          If slicing manually or otherwise tuning automatic slicing, use the following guidelines.</p>
-          <p>Query performance is most efficient when the number of slices is equal to the number of shards in the index.
-          If that number is large (for example, <code>500</code>), choose a lower number as too many slices will hurt performance.
-          Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.</p>
-          <p>Indexing performance scales linearly across available resources with the number of slices.</p>
-          <p>Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.</p>
-          <p><strong>Modify documents during reindexing</strong></p>
-          <p>Like <code>_update_by_query</code>, reindex operations support a script that modifies the document.
-          Unlike <code>_update_by_query</code>, the script is allowed to modify the document's metadata.</p>
-          <p>Just as in <code>_update_by_query</code>, you can set <code>ctx.op</code> to change the operation that is run on the destination.
-          For example, set <code>ctx.op</code> to <code>noop</code> if your script decides that the document doesn’t have to be indexed in the destination. This &quot;no operation&quot; will be reported in the <code>noop</code> counter in the response body.
-          Set <code>ctx.op</code> to <code>delete</code> if your script decides that the document must be deleted from the destination.
-          The deletion will be reported in the <code>deleted</code> counter in the response body.
-          Setting <code>ctx.op</code> to anything else will return an error, as will setting any other field in <code>ctx</code>.</p>
-          <p>Think of the possibilities! Just be careful; you are able to change:</p>
+          <p>It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes.</p>
           <ul>
-          <li><code>_id</code></li>
-          <li><code>_index</code></li>
-          <li><code>_version</code></li>
-          <li><code>_routing</code></li>
+          <li>When requested with <code>wait_for_completion=true</code> (default), the request fails if the node shuts down.</li>
+          <li>When requested with <code>wait_for_completion=false</code>, a task id is returned, for use with the task management APIs. The task may disappear or fail if the node shuts down.
+          When retrying a failed reindex operation, it might be necessary to set <code>conflicts=proceed</code> or to first delete the partial destination index.
+          Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause.</li>
           </ul>
-          <p>Setting <code>_version</code> to <code>null</code> or clearing it from the <code>ctx</code> map is just like not sending the version in an indexing request.
-          It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.</p>
-          <p><strong>Reindex from remote</strong></p>
-          <p>Reindex supports reindexing from a remote Elasticsearch cluster.
-          The <code>host</code> parameter must contain a scheme, host, port, and optional path.
-          The <code>username</code> and <code>password</code> parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.
-          Be sure to use HTTPS when using basic authentication or the password will be sent in plain text.
-          There are a range of settings available to configure the behavior of the HTTPS connection.</p>
-          <p>When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.
-          Remote hosts must be explicitly allowed with the <code>reindex.remote.whitelist</code> setting.
-          It can be set to a comma delimited list of allowed remote host and port combinations.
-          Scheme is ignored; only the host and port are used.
-          For example:</p>
-          <pre><code>reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*&quot;]
-          </code></pre>
-          <p>The list of allowed hosts must be configured on any nodes that will coordinate the reindex.
-          This feature should work with remote clusters of any version of Elasticsearch.
-          This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.</p>
-          <p>WARNING: Elasticsearch does not support forward compatibility across major versions.
-          For example, you cannot reindex from a 7.x cluster into a 6.x cluster.</p>
-          <p>To enable queries sent to older versions of Elasticsearch, the <code>query</code> parameter is sent directly to the remote host without validation or modification.</p>
-          <p>NOTE: Reindexing from remote clusters does not support manual or automatic slicing.</p>
-          <p>Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.
-          If the remote index includes very large documents you'll need to use a smaller batch size.
-          It is also possible to set the socket read timeout on the remote connection with the <code>socket_timeout</code> field and the connection timeout with the <code>connect_timeout</code> field.
-          Both default to 30 seconds.</p>
-          <p><strong>Configuring SSL parameters</strong></p>
-          <p>Reindex from remote supports configurable SSL settings.
-          These must be specified in the <code>elasticsearch.yml</code> file, with the exception of the secure settings, which you add in the Elasticsearch keystore.
-          It is not possible to configure SSL in the body of the reindex request.</p>
+          <p>Refer to the linked documentation for examples of how to reindex documents.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-reindex.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex>`_
 
         :param dest: The destination you are copying to.
         :param source: The source you are copying from.
@@ -4181,7 +3949,6 @@ class AsyncElasticsearch(BaseClient):
             reindexing.
         :param scroll: The period of time that a consistent view of the index should
             be maintained for scrolled search.
-        :param size:
         :param slices: The number of slices this task should be divided into. It defaults
             to one slice, which means the task isn't sliced into subtasks. Reindex supports
             sliced scroll to parallelize the reindexing process. This parallelization
@@ -4246,8 +4013,6 @@ class AsyncElasticsearch(BaseClient):
                 __body["max_docs"] = max_docs
             if script is not None:
                 __body["script"] = script
-            if size is not None:
-                __body["size"] = size
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -4283,7 +4048,7 @@ class AsyncElasticsearch(BaseClient):
           This behavior prevents scroll timeouts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-reindex.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex>`_
 
         :param task_id: The task identifier, which can be found by using the tasks API.
         :param requests_per_second: The throttle for this request in sub-requests per
@@ -4329,7 +4094,7 @@ class AsyncElasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         params: t.Optional[t.Mapping[str, t.Any]] = None,
         pretty: t.Optional[bool] = None,
-        source: t.Optional[str] = None,
+        source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -4339,7 +4104,7 @@ class AsyncElasticsearch(BaseClient):
           <p>Render a search template as a search request body.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/render-search-template-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template>`_
 
         :param id: The ID of the search template to render. If no `source` is specified,
             this or the `id` request body parameter is required.
@@ -4374,11 +4139,7 @@ class AsyncElasticsearch(BaseClient):
                 __body["params"] = params
             if source is not None:
                 __body["source"] = source
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -4396,7 +4157,24 @@ class AsyncElasticsearch(BaseClient):
     async def scripts_painless_execute(
         self,
         *,
-        context: t.Optional[str] = None,
+        context: t.Optional[
+            t.Union[
+                str,
+                t.Literal[
+                    "boolean_field",
+                    "composite_field",
+                    "date_field",
+                    "double_field",
+                    "filter",
+                    "geo_point_field",
+                    "ip_field",
+                    "keyword_field",
+                    "long_field",
+                    "painless_test",
+                    "score",
+                ],
+            ]
+        ] = None,
         context_setup: t.Optional[t.Mapping[str, t.Any]] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -4408,15 +4186,22 @@ class AsyncElasticsearch(BaseClient):
         """
         .. raw:: html
 
-          <p>Run a script.
-          Runs a script and returns a result.</p>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/painless/8.17/painless-execute-api.html>`_
-
-        :param context: The context that the script should run in.
-        :param context_setup: Additional parameters for the `context`.
-        :param script: The Painless script to execute.
+          <p>Run a script.</p>
+          <p>Runs a script and returns a result.
+          Use this API to build and test scripts, such as when defining a script for a runtime field.
+          This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.</p>
+          <p>The API uses several <em>contexts</em>, which control how scripts are run, what variables are available at runtime, and what the return type is.</p>
+          <p>Each context requires a script, but additional parameters depend on the context you're using for that script.</p>
+
+
+        `<https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples>`_
+
+        :param context: The context that the script should run in. NOTE: Result ordering
+            in the field contexts is not guaranteed.
+        :param context_setup: Additional parameters for the `context`. NOTE: This parameter
+            is required for all contexts except `painless_test`, which is the default
+            if no value is provided for `context`.
+        :param script: The Painless script to run.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_scripts/painless/_execute"
@@ -4437,11 +4222,7 @@ class AsyncElasticsearch(BaseClient):
                 __body["context_setup"] = context_setup
             if script is not None:
                 __body["script"] = script
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -4482,7 +4263,7 @@ class AsyncElasticsearch(BaseClient):
           <p>IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/scroll-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll>`_
 
         :param scroll_id: The scroll ID of the search.
         :param rest_total_hits_as_int: If true, the API response’s hit.total property
@@ -4566,6 +4347,7 @@ class AsyncElasticsearch(BaseClient):
         ),
         parameter_aliases={
             "_source": "source",
+            "_source_exclude_vectors": "source_exclude_vectors",
             "_source_excludes": "source_excludes",
             "_source_includes": "source_includes",
             "from": "from_",
@@ -4613,7 +4395,6 @@ class AsyncElasticsearch(BaseClient):
         ] = None,
         lenient: t.Optional[bool] = None,
         max_concurrent_shard_requests: t.Optional[int] = None,
-        min_compatible_shard_node: t.Optional[str] = None,
         min_score: t.Optional[float] = None,
         pit: t.Optional[t.Mapping[str, t.Any]] = None,
         post_filter: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -4621,6 +4402,7 @@ class AsyncElasticsearch(BaseClient):
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         q: t.Optional[str] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         rank: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -4635,7 +4417,7 @@ class AsyncElasticsearch(BaseClient):
         script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
@@ -4650,6 +4432,7 @@ class AsyncElasticsearch(BaseClient):
             ]
         ] = None,
         source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None,
+        source_exclude_vectors: t.Optional[bool] = None,
         source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         stats: t.Optional[t.Sequence[str]] = None,
@@ -4688,7 +4471,7 @@ class AsyncElasticsearch(BaseClient):
           This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams and indices,
@@ -4717,8 +4500,8 @@ class AsyncElasticsearch(BaseClient):
             node and the remote clusters are minimized when running cross-cluster search
             (CCS) requests.
         :param collapse: Collapses search results the values of the specified field.
-        :param default_operator: The default operator for the query string query: `AND`
-            or `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for the query string query: `and`
+            or `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as a default when no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -4766,10 +4549,9 @@ class AsyncElasticsearch(BaseClient):
             per node that the search runs concurrently. This value should be used to
             limit the impact of the search on the cluster in order to limit the number
             of concurrent shard requests.
-        :param min_compatible_shard_node: The minimum version of the node that can handle
-            the request. Any handling node with a lower version will fail the request.
         :param min_score: The minimum `_score` for matching documents. Documents with
-            a lower `_score` are not included in the search results.
+            a lower `_score` are not included in search results and results collected
+            by aggregations.
         :param pit: Limit the search to a point in time (PIT). If you provide a PIT,
             you cannot specify an `<index>` in the request path.
         :param post_filter: Use the `post_filter` parameter to filter search results.
@@ -4788,21 +4570,26 @@ class AsyncElasticsearch(BaseClient):
         :param preference: The nodes and shards used for the search. By default, Elasticsearch
             selects from eligible nodes and shards using adaptive replica selection,
             accounting for allocation awareness. Valid values are: * `_only_local` to
-            run the search only on shards on the local node; * `_local` to, if possible,
+            run the search only on shards on the local node. * `_local` to, if possible,
             run the search on shards on the local node, or if not, select shards using
-            the default method; * `_only_nodes:<node-id>,<node-id>` to run the search
-            on only the specified nodes IDs, where, if suitable shards exist on more
-            than one selected node, use shards on those nodes using the default method,
-            or if none of the specified nodes are available, select shards from any available
-            node using the default method; * `_prefer_nodes:<node-id>,<node-id>` to if
-            possible, run the search on the specified nodes IDs, or if not, select shards
-            using the default method; * `_shards:<shard>,<shard>` to run the search only
-            on the specified shards; * `<custom-string>` (any string that does not start
-            with `_`) to route searches with the same `<custom-string>` to the same shards
-            in the same order.
+            the default method. * `_only_nodes:<node-id>,<node-id>` to run the search
+            on only the specified nodes IDs. If suitable shards exist on more than one
+            selected node, use shards on those nodes using the default method. If none
+            of the specified nodes are available, select shards from any available node
+            using the default method. * `_prefer_nodes:<node-id>,<node-id>` to if possible,
+            run the search on the specified nodes IDs. If not, select shards using the
+            default method. * `_shards:<shard>,<shard>` to run the search only on the
+            specified shards. You can combine this value with other `preference` values.
+            However, the `_shards` value must come first. For example: `_shards:2,3|_local`.
+            * `<custom-string>` (any string that does not start with `_`) to route searches
+            with the same `<custom-string>` to the same shards in the same order.
         :param profile: Set to `true` to return detailed timing information about the
             execution of individual components in a search request. NOTE: This is a debugging
             tool and adds significant overhead to search execution.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param q: A query in the Lucene query string syntax. Query parameter searches
             do not support the full Elasticsearch Query DSL but are handy for testing.
             IMPORTANT: This parameter overrides the query parameter in the request body.
@@ -4844,6 +4631,7 @@ class AsyncElasticsearch(BaseClient):
             fields are returned in the `hits._source` property of the search response.
             If the `stored_fields` property is specified, the `_source` property defaults
             to `false`. Otherwise, it defaults to `true`.
+        :param source_exclude_vectors: Whether vectors should be excluded from _source
         :param source_excludes: A comma-separated list of source fields to exclude from
             the response. You can also use this parameter to exclude fields from the
             subset specified in `_source_includes` query parameter. If the `_source`
@@ -4950,14 +4738,14 @@ class AsyncElasticsearch(BaseClient):
             __query["lenient"] = lenient
         if max_concurrent_shard_requests is not None:
             __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests
-        if min_compatible_shard_node is not None:
-            __query["min_compatible_shard_node"] = min_compatible_shard_node
         if pre_filter_shard_size is not None:
             __query["pre_filter_shard_size"] = pre_filter_shard_size
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if q is not None:
             __query["q"] = q
         if request_cache is not None:
@@ -4970,6 +4758,8 @@ class AsyncElasticsearch(BaseClient):
             __query["scroll"] = scroll
         if search_type is not None:
             __query["search_type"] = search_type
+        if source_exclude_vectors is not None:
+            __query["_source_exclude_vectors"] = source_exclude_vectors
         if source_excludes is not None:
             __query["_source_excludes"] = source_excludes
         if source_includes is not None:
@@ -5110,6 +4900,7 @@ class AsyncElasticsearch(BaseClient):
         ] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         size: t.Optional[int] = None,
@@ -5137,51 +4928,6 @@ class AsyncElasticsearch(BaseClient):
           <li>Optionally, a <code>geo_bounds</code> aggregation on the <code>&lt;field&gt;</code>. The search only includes this aggregation if the <code>exact_bounds</code> parameter is <code>true</code>.</li>
           <li>If the optional parameter <code>with_labels</code> is <code>true</code>, the internal search will include a dynamic runtime field that calls the <code>getLabelPosition</code> function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.</li>
           </ul>
-          <p>For example, Elasticsearch may translate a vector tile search API request with a <code>grid_agg</code> argument of <code>geotile</code> and an <code>exact_bounds</code> argument of <code>true</code> into the following search</p>
-          <pre><code>GET my-index/_search
-          {
-            &quot;size&quot;: 10000,
-            &quot;query&quot;: {
-              &quot;geo_bounding_box&quot;: {
-                &quot;my-geo-field&quot;: {
-                  &quot;top_left&quot;: {
-                    &quot;lat&quot;: -40.979898069620134,
-                    &quot;lon&quot;: -45
-                  },
-                  &quot;bottom_right&quot;: {
-                    &quot;lat&quot;: -66.51326044311186,
-                    &quot;lon&quot;: 0
-                  }
-                }
-              }
-            },
-            &quot;aggregations&quot;: {
-              &quot;grid&quot;: {
-                &quot;geotile_grid&quot;: {
-                  &quot;field&quot;: &quot;my-geo-field&quot;,
-                  &quot;precision&quot;: 11,
-                  &quot;size&quot;: 65536,
-                  &quot;bounds&quot;: {
-                    &quot;top_left&quot;: {
-                      &quot;lat&quot;: -40.979898069620134,
-                      &quot;lon&quot;: -45
-                    },
-                    &quot;bottom_right&quot;: {
-                      &quot;lat&quot;: -66.51326044311186,
-                      &quot;lon&quot;: 0
-                    }
-                  }
-                }
-              },
-              &quot;bounds&quot;: {
-                &quot;geo_bounds&quot;: {
-                  &quot;field&quot;: &quot;my-geo-field&quot;,
-                  &quot;wrap_longitude&quot;: false
-                }
-              }
-            }
-          }
-          </code></pre>
           <p>The API returns results as a binary Mapbox vector tile.
           Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:</p>
           <ul>
@@ -5436,9 +5182,10 @@ class AsyncElasticsearch(BaseClient):
           Some cells may intersect more than one vector tile.
           To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.
           Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.</p>
+          <p>Learn how to use the vector tile search API with practical examples in the <a href="https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search">Vector tile search examples</a> guide.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-vector-tile-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt>`_
 
         :param index: Comma-separated list of data streams, indices, or aliases to search
         :param field: Field containing geospatial data to return
@@ -5471,6 +5218,10 @@ class AsyncElasticsearch(BaseClient):
             In the aggs layer, each feature represents a `geotile_grid` cell. If `grid,
             each feature is a polygon of the cells bounding box. If `point`, each feature
             is a Point that is the centroid of the cell.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param query: The query DSL used to filter documents for the search.
         :param runtime_mappings: Defines one or more runtime fields in the search request.
             These fields take precedence over mapped fields with the same name.
@@ -5534,6 +5285,8 @@ class AsyncElasticsearch(BaseClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if not __body:
             if aggs is not None:
                 __body["aggs"] = aggs
@@ -5597,6 +5350,7 @@ class AsyncElasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
@@ -5611,7 +5365,7 @@ class AsyncElasticsearch(BaseClient):
           <p>If the Elasticsearch security features are enabled, you must have the <code>view_index_metadata</code> or <code>manage</code> index privilege for the target data stream, index, or alias.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-shards.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams and indices,
@@ -5624,11 +5378,15 @@ class AsyncElasticsearch(BaseClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param local: If `true`, the request retrieves information from the local node
             only.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. IT can also be set to `-1` to indicate that the
+            request should never timeout.
         :param preference: The node or shard the operation should be performed on. It
             is random by default.
         :param routing: A custom value used to route operations to a specific shard.
@@ -5655,6 +5413,8 @@ class AsyncElasticsearch(BaseClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if local is not None:
             __query["local"] = local
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
@@ -5700,13 +5460,14 @@ class AsyncElasticsearch(BaseClient):
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         rest_total_hits_as_int: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
         ] = None,
-        source: t.Optional[str] = None,
+        source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None,
         typed_keys: t.Optional[bool] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -5716,7 +5477,7 @@ class AsyncElasticsearch(BaseClient):
           <p>Run a search with a search template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-template-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`).
@@ -5730,8 +5491,7 @@ class AsyncElasticsearch(BaseClient):
         :param expand_wildcards: The type of index that wildcard patterns can match.
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. Supports comma-separated
-            values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`,
-            `hidden`, `none`.
+            values, such as `open,hidden`.
         :param explain: If `true`, returns detailed information about score calculation
             as part of each hit. If you specify both this and the `explain` query parameter,
             the API uses only the query parameter.
@@ -5746,6 +5506,10 @@ class AsyncElasticsearch(BaseClient):
         :param preference: The node or shard the operation should be performed on. It
             is random by default.
         :param profile: If `true`, the query execution is profiled.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param rest_total_hits_as_int: If `true`, `hits.total` is rendered as an integer
             in the response. If `false`, it is rendered as an object.
         :param routing: A custom value used to route operations to a specific shard.
@@ -5787,6 +5551,8 @@ class AsyncElasticsearch(BaseClient):
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if rest_total_hits_as_int is not None:
             __query["rest_total_hits_as_int"] = rest_total_hits_as_int
         if routing is not None:
@@ -5859,7 +5625,7 @@ class AsyncElasticsearch(BaseClient):
           </blockquote>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-terms-enum.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum>`_
 
         :param index: A comma-separated list of data streams, indices, and index aliases
             to search. Wildcard (`*`) expressions are supported. To search all data streams
@@ -5913,11 +5679,7 @@ class AsyncElasticsearch(BaseClient):
                 __body["string"] = string
             if timeout is not None:
                 __body["timeout"] = timeout
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -5929,7 +5691,20 @@ class AsyncElasticsearch(BaseClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("doc", "filter", "per_field_analyzer"),
+        body_fields=(
+            "doc",
+            "field_statistics",
+            "fields",
+            "filter",
+            "offsets",
+            "payloads",
+            "per_field_analyzer",
+            "positions",
+            "routing",
+            "term_statistics",
+            "version",
+            "version_type",
+        ),
     )
     async def termvectors(
         self,
@@ -5939,7 +5714,7 @@ class AsyncElasticsearch(BaseClient):
         doc: t.Optional[t.Mapping[str, t.Any]] = None,
         error_trace: t.Optional[bool] = None,
         field_statistics: t.Optional[bool] = None,
-        fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        fields: t.Optional[t.Sequence[str]] = None,
         filter: t.Optional[t.Mapping[str, t.Any]] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -5954,7 +5729,7 @@ class AsyncElasticsearch(BaseClient):
         term_statistics: t.Optional[bool] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -5992,10 +5767,11 @@ class AsyncElasticsearch(BaseClient):
           The information is only retrieved for the shard the requested document resides in.
           The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.
           By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.
-          Use <code>routing</code> only to hit a particular shard.</p>
+          Use <code>routing</code> only to hit a particular shard.
+          Refer to the linked documentation for detailed examples of how to use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-termvectors.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors>`_
 
         :param index: The name of the index that contains the document.
         :param id: A unique identifier for the document.
@@ -6006,9 +5782,9 @@ class AsyncElasticsearch(BaseClient):
             (the sum of document frequencies for all terms in this field). * The sum
             of total term frequencies (the sum of total term frequencies of each term
             in this field).
-        :param fields: A comma-separated list or wildcard expressions of fields to include
-            in the statistics. It is used as the default list unless a specific field
-            list is provided in the `completion_fields` or `fielddata_fields` parameters.
+        :param fields: A list of fields to include in the statistics. It is used as the
+            default list unless a specific field list is provided in the `completion_fields`
+            or `fielddata_fields` parameters.
         :param filter: Filter terms based on their tf-idf scores. This could be useful
             in order find out a good characteristic vector of a document. This feature
             works in a similar manner to the second phase of the More Like This Query.
@@ -6046,41 +5822,41 @@ class AsyncElasticsearch(BaseClient):
         __body: t.Dict[str, t.Any] = body if body is not None else {}
         if error_trace is not None:
             __query["error_trace"] = error_trace
-        if field_statistics is not None:
-            __query["field_statistics"] = field_statistics
-        if fields is not None:
-            __query["fields"] = fields
         if filter_path is not None:
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
-        if offsets is not None:
-            __query["offsets"] = offsets
-        if payloads is not None:
-            __query["payloads"] = payloads
-        if positions is not None:
-            __query["positions"] = positions
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
         if realtime is not None:
             __query["realtime"] = realtime
-        if routing is not None:
-            __query["routing"] = routing
-        if term_statistics is not None:
-            __query["term_statistics"] = term_statistics
-        if version is not None:
-            __query["version"] = version
-        if version_type is not None:
-            __query["version_type"] = version_type
         if not __body:
             if doc is not None:
                 __body["doc"] = doc
+            if field_statistics is not None:
+                __body["field_statistics"] = field_statistics
+            if fields is not None:
+                __body["fields"] = fields
             if filter is not None:
                 __body["filter"] = filter
+            if offsets is not None:
+                __body["offsets"] = offsets
+            if payloads is not None:
+                __body["payloads"] = payloads
             if per_field_analyzer is not None:
                 __body["per_field_analyzer"] = per_field_analyzer
+            if positions is not None:
+                __body["positions"] = positions
+            if routing is not None:
+                __body["routing"] = routing
+            if term_statistics is not None:
+                __body["term_statistics"] = term_statistics
+            if version is not None:
+                __body["version"] = version
+            if version_type is not None:
+                __body["version_type"] = version_type
         if not __body:
             __body = None  # type: ignore[assignment]
         __headers = {"accept": "application/json"}
@@ -6125,6 +5901,7 @@ class AsyncElasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         if_primary_term: t.Optional[int] = None,
         if_seq_no: t.Optional[int] = None,
+        include_source_on_error: t.Optional[bool] = None,
         lang: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         refresh: t.Optional[
@@ -6162,10 +5939,11 @@ class AsyncElasticsearch(BaseClient):
           </ul>
           <p>The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.</p>
           <p>The <code>_source</code> field must be enabled to use this API.
-          In addition to <code>_source</code>, you can access the following variables through the <code>ctx</code> map: <code>_index</code>, <code>_type</code>, <code>_id</code>, <code>_version</code>, <code>_routing</code>, and <code>_now</code> (the current timestamp).</p>
+          In addition to <code>_source</code>, you can access the following variables through the <code>ctx</code> map: <code>_index</code>, <code>_type</code>, <code>_id</code>, <code>_version</code>, <code>_routing</code>, and <code>_now</code> (the current timestamp).
+          For usage examples such as partial updates, upserts, and scripted updates, see the External documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-update.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update>`_
 
         :param index: The name of the target index. By default, the index is created
             automatically if it doesn't exist.
@@ -6180,6 +5958,8 @@ class AsyncElasticsearch(BaseClient):
             term.
         :param if_seq_no: Only perform the operation if the document has this sequence
             number.
+        :param include_source_on_error: True or false if to include the document source
+            in the error message in case of parsing errors.
         :param lang: The script language.
         :param refresh: If 'true', Elasticsearch refreshes the affected shards to make
             this operation visible to search. If 'wait_for', it waits for a refresh to
@@ -6224,6 +6004,8 @@ class AsyncElasticsearch(BaseClient):
             __query["if_primary_term"] = if_primary_term
         if if_seq_no is not None:
             __query["if_seq_no"] = if_seq_no
+        if include_source_on_error is not None:
+            __query["include_source_on_error"] = include_source_on_error
         if lang is not None:
             __query["lang"] = lang
         if pretty is not None:
@@ -6351,6 +6133,24 @@ class AsyncElasticsearch(BaseClient):
           A bulk update request is performed for each batch of matching documents.
           Any query or update failures cause the update by query request to fail and the failures are shown in the response.
           Any update requests that completed successfully still stick, they are not rolled back.</p>
+          <p><strong>Refreshing shards</strong></p>
+          <p>Specifying the <code>refresh</code> parameter refreshes all shards once the request completes.
+          This is different to the update API's <code>refresh</code> parameter, which causes only the shard
+          that received the request to be refreshed. Unlike the update API, it does not support
+          <code>wait_for</code>.</p>
+          <p><strong>Running update by query asynchronously</strong></p>
+          <p>If the request contains <code>wait_for_completion=false</code>, Elasticsearch
+          performs some preflight checks, launches the request, and returns a
+          <a href="https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks">task</a> you can use to cancel or get the status of the task.
+          Elasticsearch creates a record of this task as a document at <code>.tasks/task/${taskId}</code>.</p>
+          <p><strong>Waiting for active shards</strong></p>
+          <p><code>wait_for_active_shards</code> controls how many copies of a shard must be active
+          before proceeding with the request. See <a href="https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards"><code>wait_for_active_shards</code></a>
+          for details. <code>timeout</code> controls how long each write request waits for unavailable
+          shards to become available. Both work exactly the way they work in the
+          <a href="https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk">Bulk API</a>. Update by query uses scrolled searches, so you can also
+          specify the <code>scroll</code> parameter to control how long it keeps the search context
+          alive, for example <code>?scroll=10m</code>. The default is 5 minutes.</p>
           <p><strong>Throttling update requests</strong></p>
           <p>To control the rate at which update by query issues batches of update operations, you can set <code>requests_per_second</code> to any positive decimal number.
           This pads each batch with a wait time to throttle the rate.
@@ -6385,21 +6185,11 @@ class AsyncElasticsearch(BaseClient):
           <li>Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.</li>
           <li>Update performance scales linearly across available resources with the number of slices.</li>
           </ul>
-          <p>Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.</p>
-          <p><strong>Update the document source</strong></p>
-          <p>Update by query supports scripts to update the document source.
-          As with the update API, you can set <code>ctx.op</code> to change the operation that is performed.</p>
-          <p>Set <code>ctx.op = &quot;noop&quot;</code> if your script decides that it doesn't have to make any changes.
-          The update by query operation skips updating the document and increments the <code>noop</code> counter.</p>
-          <p>Set <code>ctx.op = &quot;delete&quot;</code> if your script decides that the document should be deleted.
-          The update by query operation deletes the document and increments the <code>deleted</code> counter.</p>
-          <p>Update by query supports only <code>index</code>, <code>noop</code>, and <code>delete</code>.
-          Setting <code>ctx.op</code> to anything else is an error.
-          Setting any other field in <code>ctx</code> is an error.
-          This API enables you to only modify the source of matching documents; you cannot move them.</p>
+          <p>Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.
+          Refer to the linked documentation for examples of how to update documents using the <code>_update_by_query</code> API:</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-update-by-query.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams or indices,
@@ -6415,8 +6205,8 @@ class AsyncElasticsearch(BaseClient):
             be used only when the `q` query string parameter is specified.
         :param conflicts: The preferred behavior when update by query hits version conflicts:
             `abort` or `proceed`.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for query string query: `and` or
+            `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as default where no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -6424,9 +6214,8 @@ class AsyncElasticsearch(BaseClient):
         :param expand_wildcards: The type of index that wildcard patterns can match.
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. It supports comma-separated
-            values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`,
-            `hidden`, `none`.
-        :param from_: Starting offset (default: 0)
+            values, such as `open,hidden`.
+        :param from_: Skips the specified number of documents.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param lenient: If `true`, format-based query failures (such as providing text
@@ -6619,7 +6408,7 @@ class AsyncElasticsearch(BaseClient):
           Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-update-by-query.html#docs-update-by-query-rethrottle>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle>`_
 
         :param task_id: The ID for the task.
         :param requests_per_second: The throttle for this request in sub-requests per
diff -pruN 8.17.2-2/elasticsearch/_async/client/_base.py 9.2.0-1/elasticsearch/_async/client/_base.py
--- 8.17.2-2/elasticsearch/_async/client/_base.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/_base.py	2025-10-28 16:50:53.000000000 +0000
@@ -174,7 +174,7 @@ def create_sniff_callback(
                     "GET",
                     "/_nodes/_all/http",
                     headers={
-                        "accept": "application/vnd.elasticsearch+json; compatible-with=8"
+                        "accept": "application/vnd.elasticsearch+json; compatible-with=9"
                     },
                     request_timeout=(
                         sniff_options.sniff_timeout
@@ -298,7 +298,6 @@ class BaseClient:
         def mimetype_header_to_compat(header: str) -> None:
             # Converts all parts of a Accept/Content-Type headers
             # from application/X -> application/vnd.elasticsearch+X
-            nonlocal request_headers
             mimetype = request_headers.get(header, None)
             if mimetype:
                 request_headers[header] = _COMPAT_MIMETYPE_RE.sub(
diff -pruN 8.17.2-2/elasticsearch/_async/client/async_search.py 9.2.0-1/elasticsearch/_async/client/async_search.py
--- 8.17.2-2/elasticsearch/_async/client/async_search.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/async_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class AsyncSearchClient(NamespacedClient
           If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the <code>cancel_task</code> cluster privilege.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/async-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit>`_
 
         :param id: A unique identifier for the async search.
         """
@@ -94,11 +94,11 @@ class AsyncSearchClient(NamespacedClient
           If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/async-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit>`_
 
         :param id: A unique identifier for the async search.
-        :param keep_alive: Specifies how long the async search should be available in
-            the cluster. When not specified, the `keep_alive` set with the corresponding
+        :param keep_alive: The length of time that the async search should be available
+            in the cluster. When not specified, the `keep_alive` set with the corresponding
             submit async request will be used. Otherwise, it is possible to override
             the value and extend the validity of the request. When this period expires,
             the search, if still running, is cancelled. If the search is completed, its
@@ -157,13 +157,17 @@ class AsyncSearchClient(NamespacedClient
 
           <p>Get the async search status.</p>
           <p>Get the status of a previously submitted async search request given its identifier, without retrieving search results.
-          If the Elasticsearch security features are enabled, use of this API is restricted to the <code>monitoring_user</code> role.</p>
+          If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:</p>
+          <ul>
+          <li>The user or API key that submitted the original async search request.</li>
+          <li>Users that have the <code>monitor</code> cluster privilege or greater privileges.</li>
+          </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/async-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit>`_
 
         :param id: A unique identifier for the async search.
-        :param keep_alive: Specifies how long the async search needs to be available.
+        :param keep_alive: The length of time that the async search needs to be available.
             Ongoing async searches and any saved search results are deleted after this
             period.
         """
@@ -277,13 +281,13 @@ class AsyncSearchClient(NamespacedClient
         ] = None,
         lenient: t.Optional[bool] = None,
         max_concurrent_shard_requests: t.Optional[int] = None,
-        min_compatible_shard_node: t.Optional[str] = None,
         min_score: t.Optional[float] = None,
         pit: t.Optional[t.Mapping[str, t.Any]] = None,
         post_filter: t.Optional[t.Mapping[str, t.Any]] = None,
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         q: t.Optional[str] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         request_cache: t.Optional[bool] = None,
@@ -295,7 +299,7 @@ class AsyncSearchClient(NamespacedClient
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
@@ -342,7 +346,7 @@ class AsyncSearchClient(NamespacedClient
           The maximum allowed size for a stored async search response can be set by changing the <code>search.max_async_search_response_size</code> cluster level setting.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/async-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit>`_
 
         :param index: A comma-separated list of index names to search; use `_all` or
             empty string to perform the operation on all indices
@@ -397,15 +401,18 @@ class AsyncSearchClient(NamespacedClient
             per node this search executes concurrently. This value should be used to
             limit the impact of the search on the cluster in order to limit the number
             of concurrent shard requests
-        :param min_compatible_shard_node:
         :param min_score: Minimum _score for matching documents. Documents with a lower
-            _score are not included in the search results.
+            _score are not included in search results and results collected by aggregations.
         :param pit: Limits the search to a point in time (PIT). If you provide a PIT,
             you cannot specify an <index> in the request path.
         :param post_filter:
         :param preference: Specify the node or shard the operation should be performed
             on (default: random)
         :param profile:
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param q: Query in the Lucene query string syntax
         :param query: Defines the search definition using the Query DSL.
         :param request_cache: Specify if request cache should be used for this request
@@ -522,12 +529,12 @@ class AsyncSearchClient(NamespacedClient
             __query["lenient"] = lenient
         if max_concurrent_shard_requests is not None:
             __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests
-        if min_compatible_shard_node is not None:
-            __query["min_compatible_shard_node"] = min_compatible_shard_node
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if q is not None:
             __query["q"] = q
         if request_cache is not None:
diff -pruN 8.17.2-2/elasticsearch/_async/client/autoscaling.py 9.2.0-1/elasticsearch/_async/client/autoscaling.py
--- 8.17.2-2/elasticsearch/_async/client/autoscaling.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/autoscaling.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class AutoscalingClient(NamespacedClient
           <p>NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/autoscaling-delete-autoscaling-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy>`_
 
         :param name: the name of the autoscaling policy
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -104,7 +104,7 @@ class AutoscalingClient(NamespacedClient
           Do not use this information to make autoscaling decisions.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/autoscaling-get-autoscaling-capacity.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity>`_
 
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
@@ -151,7 +151,7 @@ class AutoscalingClient(NamespacedClient
           <p>NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/autoscaling-get-autoscaling-capacity.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity>`_
 
         :param name: the name of the autoscaling policy
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -206,7 +206,7 @@ class AutoscalingClient(NamespacedClient
           <p>NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/autoscaling-put-autoscaling-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy>`_
 
         :param name: the name of the autoscaling policy
         :param policy:
diff -pruN 8.17.2-2/elasticsearch/_async/client/cat.py 9.2.0-1/elasticsearch/_async/client/cat.py
--- 8.17.2-2/elasticsearch/_async/client/cat.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/cat.py	2025-10-28 16:50:53.000000000 +0000
@@ -36,6 +36,9 @@ class CatClient(NamespacedClient):
         self,
         *,
         name: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         expand_wildcards: t.Optional[
             t.Union[
@@ -47,13 +50,42 @@ class CatClient(NamespacedClient):
         ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "alias",
+                            "filter",
+                            "index",
+                            "is_write_index",
+                            "routing.index",
+                            "routing.search",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "alias",
+                        "filter",
+                        "index",
+                        "is_write_index",
+                        "routing.index",
+                        "routing.search",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
-        local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -65,23 +97,28 @@ class CatClient(NamespacedClient):
           <p>IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-alias.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases>`_
 
         :param name: A comma-separated list of aliases to retrieve. Supports wildcards
             (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param expand_wildcards: The type of index that wildcard patterns can match.
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. It supports comma-separated
             values, such as `open,hidden`.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
-        :param local: If `true`, the request computes the list of selected nodes from
-            the local cluster state. If `false` the list of selected nodes are computed
-            from the cluster state of the master node. In both cases the coordinating
-            node will send requests for further information to each selected node.
         :param master_timeout: The period to wait for a connection to the master node.
             If the master node is not available before the timeout expires, the request
             fails and returns an error. To indicated that the request should never timeout,
@@ -89,6 +126,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -99,6 +142,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/aliases"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if expand_wildcards is not None:
@@ -113,14 +158,14 @@ class CatClient(NamespacedClient):
             __query["help"] = help
         if human is not None:
             __query["human"] = human
-        if local is not None:
-            __query["local"] = local
         if master_timeout is not None:
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -144,13 +189,57 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "disk.avail",
+                            "disk.indices",
+                            "disk.indices.forecast",
+                            "disk.percent",
+                            "disk.total",
+                            "disk.used",
+                            "host",
+                            "ip",
+                            "node",
+                            "node.role",
+                            "shards",
+                            "shards.undesired",
+                            "write_load.forecast",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "disk.avail",
+                        "disk.indices",
+                        "disk.indices.forecast",
+                        "disk.percent",
+                        "disk.total",
+                        "disk.used",
+                        "host",
+                        "ip",
+                        "node",
+                        "node.role",
+                        "shards",
+                        "shards.undesired",
+                        "write_load.forecast",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -161,14 +250,22 @@ class CatClient(NamespacedClient):
           <p>IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-allocation.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation>`_
 
         :param node_id: A comma-separated list of node identifiers or names used to limit
             the returned information.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -179,6 +276,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -211,6 +314,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -228,16 +333,51 @@ class CatClient(NamespacedClient):
         self,
         *,
         name: t.Optional[str] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "alias_count",
+                            "included_in",
+                            "mapping_count",
+                            "metadata_count",
+                            "name",
+                            "settings_count",
+                            "version",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "alias_count",
+                        "included_in",
+                        "mapping_count",
+                        "metadata_count",
+                        "name",
+                        "settings_count",
+                        "version",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -250,13 +390,22 @@ class CatClient(NamespacedClient):
           They are not intended for use by applications. For application consumption, use the get component template API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-component-templates.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates>`_
 
         :param name: The name of the component template. It accepts wildcard expressions.
             If it is omitted, all component templates are returned.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -267,6 +416,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -277,6 +432,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/component_templates"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -297,6 +454,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -314,14 +473,26 @@ class CatClient(NamespacedClient):
         self,
         *,
         index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[t.Union[str, t.Literal["count", "epoch", "timestamp"]]],
+                t.Union[str, t.Literal["count", "epoch", "timestamp"]],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -334,19 +505,38 @@ class CatClient(NamespacedClient):
           They are not intended for use by applications. For application consumption, use the count API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-count.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. It supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -357,6 +547,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/count"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -371,8 +563,12 @@ class CatClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -396,11 +592,21 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]]
+                ],
+                t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -412,19 +618,33 @@ class CatClient(NamespacedClient):
           They are not intended for use by applications. For application consumption, use the nodes stats API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-fielddata.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata>`_
 
         :param fields: Comma-separated list of fields used to limit returned information.
             To retrieve all fields, omit this parameter.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -453,6 +673,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -469,10 +691,58 @@ class CatClient(NamespacedClient):
     async def health(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "active_shards_percent",
+                            "cluster",
+                            "epoch",
+                            "init",
+                            "max_task_wait_time",
+                            "node.data",
+                            "node.total",
+                            "pending_tasks",
+                            "pri",
+                            "relo",
+                            "shards",
+                            "status",
+                            "timestamp",
+                            "unassign",
+                            "unassign.pri",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "active_shards_percent",
+                        "cluster",
+                        "epoch",
+                        "init",
+                        "max_task_wait_time",
+                        "node.data",
+                        "node.total",
+                        "pending_tasks",
+                        "pri",
+                        "relo",
+                        "shards",
+                        "status",
+                        "timestamp",
+                        "unassign",
+                        "unassign.pri",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
@@ -498,23 +768,39 @@ class CatClient(NamespacedClient):
           You also can use the API to track the recovery of a large cluster over a longer period of time.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-health.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param ts: If true, returns `HH:MM:SS` and Unix epoch timestamps.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/health"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -556,7 +842,7 @@ class CatClient(NamespacedClient):
           <p>Get help for the CAT APIs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat"
@@ -590,8 +876,319 @@ class CatClient(NamespacedClient):
         ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        health: t.Optional[t.Union[str, t.Literal["green", "red", "yellow"]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "bulk.avg_size_in_bytes",
+                            "bulk.avg_time",
+                            "bulk.total_operations",
+                            "bulk.total_size_in_bytes",
+                            "bulk.total_time",
+                            "completion.size",
+                            "creation.date",
+                            "creation.date.string",
+                            "dataset.size",
+                            "dense_vector.value_count",
+                            "docs.count",
+                            "docs.deleted",
+                            "fielddata.evictions",
+                            "fielddata.memory_size",
+                            "flush.total",
+                            "flush.total_time",
+                            "get.current",
+                            "get.exists_time",
+                            "get.exists_total",
+                            "get.missing_time",
+                            "get.missing_total",
+                            "get.time",
+                            "get.total",
+                            "health",
+                            "index",
+                            "indexing.delete_current",
+                            "indexing.delete_time",
+                            "indexing.delete_total",
+                            "indexing.index_current",
+                            "indexing.index_failed",
+                            "indexing.index_failed_due_to_version_conflict",
+                            "indexing.index_time",
+                            "indexing.index_total",
+                            "memory.total",
+                            "merges.current",
+                            "merges.current_docs",
+                            "merges.current_size",
+                            "merges.total",
+                            "merges.total_docs",
+                            "merges.total_size",
+                            "merges.total_time",
+                            "pri",
+                            "pri.bulk.avg_size_in_bytes",
+                            "pri.bulk.avg_time",
+                            "pri.bulk.total_operations",
+                            "pri.bulk.total_size_in_bytes",
+                            "pri.bulk.total_time",
+                            "pri.completion.size",
+                            "pri.dense_vector.value_count",
+                            "pri.fielddata.evictions",
+                            "pri.fielddata.memory_size",
+                            "pri.flush.total",
+                            "pri.flush.total_time",
+                            "pri.get.current",
+                            "pri.get.exists_time",
+                            "pri.get.exists_total",
+                            "pri.get.missing_time",
+                            "pri.get.missing_total",
+                            "pri.get.time",
+                            "pri.get.total",
+                            "pri.indexing.delete_current",
+                            "pri.indexing.delete_time",
+                            "pri.indexing.delete_total",
+                            "pri.indexing.index_current",
+                            "pri.indexing.index_failed",
+                            "pri.indexing.index_failed_due_to_version_conflict",
+                            "pri.indexing.index_time",
+                            "pri.indexing.index_total",
+                            "pri.memory.total",
+                            "pri.merges.current",
+                            "pri.merges.current_docs",
+                            "pri.merges.current_size",
+                            "pri.merges.total",
+                            "pri.merges.total_docs",
+                            "pri.merges.total_size",
+                            "pri.merges.total_time",
+                            "pri.query_cache.evictions",
+                            "pri.query_cache.memory_size",
+                            "pri.refresh.external_time",
+                            "pri.refresh.external_total",
+                            "pri.refresh.listeners",
+                            "pri.refresh.time",
+                            "pri.refresh.total",
+                            "pri.request_cache.evictions",
+                            "pri.request_cache.hit_count",
+                            "pri.request_cache.memory_size",
+                            "pri.request_cache.miss_count",
+                            "pri.search.fetch_current",
+                            "pri.search.fetch_time",
+                            "pri.search.fetch_total",
+                            "pri.search.open_contexts",
+                            "pri.search.query_current",
+                            "pri.search.query_time",
+                            "pri.search.query_total",
+                            "pri.search.scroll_current",
+                            "pri.search.scroll_time",
+                            "pri.search.scroll_total",
+                            "pri.segments.count",
+                            "pri.segments.fixed_bitset_memory",
+                            "pri.segments.index_writer_memory",
+                            "pri.segments.memory",
+                            "pri.segments.version_map_memory",
+                            "pri.sparse_vector.value_count",
+                            "pri.store.size",
+                            "pri.suggest.current",
+                            "pri.suggest.time",
+                            "pri.suggest.total",
+                            "pri.warmer.current",
+                            "pri.warmer.total",
+                            "pri.warmer.total_time",
+                            "query_cache.evictions",
+                            "query_cache.memory_size",
+                            "refresh.external_time",
+                            "refresh.external_total",
+                            "refresh.listeners",
+                            "refresh.time",
+                            "refresh.total",
+                            "rep",
+                            "request_cache.evictions",
+                            "request_cache.hit_count",
+                            "request_cache.memory_size",
+                            "request_cache.miss_count",
+                            "search.fetch_current",
+                            "search.fetch_time",
+                            "search.fetch_total",
+                            "search.open_contexts",
+                            "search.query_current",
+                            "search.query_time",
+                            "search.query_total",
+                            "search.scroll_current",
+                            "search.scroll_time",
+                            "search.scroll_total",
+                            "segments.count",
+                            "segments.fixed_bitset_memory",
+                            "segments.index_writer_memory",
+                            "segments.memory",
+                            "segments.version_map_memory",
+                            "sparse_vector.value_count",
+                            "status",
+                            "store.size",
+                            "suggest.current",
+                            "suggest.time",
+                            "suggest.total",
+                            "uuid",
+                            "warmer.current",
+                            "warmer.total",
+                            "warmer.total_time",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "bulk.avg_size_in_bytes",
+                        "bulk.avg_time",
+                        "bulk.total_operations",
+                        "bulk.total_size_in_bytes",
+                        "bulk.total_time",
+                        "completion.size",
+                        "creation.date",
+                        "creation.date.string",
+                        "dataset.size",
+                        "dense_vector.value_count",
+                        "docs.count",
+                        "docs.deleted",
+                        "fielddata.evictions",
+                        "fielddata.memory_size",
+                        "flush.total",
+                        "flush.total_time",
+                        "get.current",
+                        "get.exists_time",
+                        "get.exists_total",
+                        "get.missing_time",
+                        "get.missing_total",
+                        "get.time",
+                        "get.total",
+                        "health",
+                        "index",
+                        "indexing.delete_current",
+                        "indexing.delete_time",
+                        "indexing.delete_total",
+                        "indexing.index_current",
+                        "indexing.index_failed",
+                        "indexing.index_failed_due_to_version_conflict",
+                        "indexing.index_time",
+                        "indexing.index_total",
+                        "memory.total",
+                        "merges.current",
+                        "merges.current_docs",
+                        "merges.current_size",
+                        "merges.total",
+                        "merges.total_docs",
+                        "merges.total_size",
+                        "merges.total_time",
+                        "pri",
+                        "pri.bulk.avg_size_in_bytes",
+                        "pri.bulk.avg_time",
+                        "pri.bulk.total_operations",
+                        "pri.bulk.total_size_in_bytes",
+                        "pri.bulk.total_time",
+                        "pri.completion.size",
+                        "pri.dense_vector.value_count",
+                        "pri.fielddata.evictions",
+                        "pri.fielddata.memory_size",
+                        "pri.flush.total",
+                        "pri.flush.total_time",
+                        "pri.get.current",
+                        "pri.get.exists_time",
+                        "pri.get.exists_total",
+                        "pri.get.missing_time",
+                        "pri.get.missing_total",
+                        "pri.get.time",
+                        "pri.get.total",
+                        "pri.indexing.delete_current",
+                        "pri.indexing.delete_time",
+                        "pri.indexing.delete_total",
+                        "pri.indexing.index_current",
+                        "pri.indexing.index_failed",
+                        "pri.indexing.index_failed_due_to_version_conflict",
+                        "pri.indexing.index_time",
+                        "pri.indexing.index_total",
+                        "pri.memory.total",
+                        "pri.merges.current",
+                        "pri.merges.current_docs",
+                        "pri.merges.current_size",
+                        "pri.merges.total",
+                        "pri.merges.total_docs",
+                        "pri.merges.total_size",
+                        "pri.merges.total_time",
+                        "pri.query_cache.evictions",
+                        "pri.query_cache.memory_size",
+                        "pri.refresh.external_time",
+                        "pri.refresh.external_total",
+                        "pri.refresh.listeners",
+                        "pri.refresh.time",
+                        "pri.refresh.total",
+                        "pri.request_cache.evictions",
+                        "pri.request_cache.hit_count",
+                        "pri.request_cache.memory_size",
+                        "pri.request_cache.miss_count",
+                        "pri.search.fetch_current",
+                        "pri.search.fetch_time",
+                        "pri.search.fetch_total",
+                        "pri.search.open_contexts",
+                        "pri.search.query_current",
+                        "pri.search.query_time",
+                        "pri.search.query_total",
+                        "pri.search.scroll_current",
+                        "pri.search.scroll_time",
+                        "pri.search.scroll_total",
+                        "pri.segments.count",
+                        "pri.segments.fixed_bitset_memory",
+                        "pri.segments.index_writer_memory",
+                        "pri.segments.memory",
+                        "pri.segments.version_map_memory",
+                        "pri.sparse_vector.value_count",
+                        "pri.store.size",
+                        "pri.suggest.current",
+                        "pri.suggest.time",
+                        "pri.suggest.total",
+                        "pri.warmer.current",
+                        "pri.warmer.total",
+                        "pri.warmer.total_time",
+                        "query_cache.evictions",
+                        "query_cache.memory_size",
+                        "refresh.external_time",
+                        "refresh.external_total",
+                        "refresh.listeners",
+                        "refresh.time",
+                        "refresh.total",
+                        "rep",
+                        "request_cache.evictions",
+                        "request_cache.hit_count",
+                        "request_cache.memory_size",
+                        "request_cache.miss_count",
+                        "search.fetch_current",
+                        "search.fetch_time",
+                        "search.fetch_total",
+                        "search.open_contexts",
+                        "search.query_current",
+                        "search.query_time",
+                        "search.query_total",
+                        "search.scroll_current",
+                        "search.scroll_time",
+                        "search.scroll_total",
+                        "segments.count",
+                        "segments.fixed_bitset_memory",
+                        "segments.index_writer_memory",
+                        "segments.memory",
+                        "segments.version_map_memory",
+                        "sparse_vector.value_count",
+                        "status",
+                        "store.size",
+                        "suggest.current",
+                        "suggest.time",
+                        "suggest.total",
+                        "uuid",
+                        "warmer.current",
+                        "warmer.total",
+                        "warmer.total_time",
+                    ],
+                ],
+            ]
+        ] = None,
+        health: t.Optional[
+            t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         include_unloaded_segments: t.Optional[bool] = None,
@@ -623,16 +1220,24 @@ class CatClient(NamespacedClient):
           They are not intended for use by applications. For application consumption, use an index endpoint.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-indices.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param expand_wildcards: The type of index that wildcard patterns can match.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param health: The health status used to limit returned indices. By default,
             the response includes indices of any health status.
         :param help: When set to `true` will output available columns. This option can't
@@ -644,7 +1249,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -701,16 +1311,27 @@ class CatClient(NamespacedClient):
     async def master(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[t.Union[str, t.Literal["host", "id", "ip", "node"]]],
+                t.Union[str, t.Literal["host", "id", "ip", "node"]],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -721,11 +1342,20 @@ class CatClient(NamespacedClient):
           <p>IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-master.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -736,11 +1366,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/master"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -761,6 +1399,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -899,12 +1539,19 @@ class CatClient(NamespacedClient):
           application consumption, use the get data frame analytics jobs statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics>`_
 
         :param id: The ID of the data frame analytics to fetch
         :param allow_no_match: Whether to ignore if a wildcard expression matches no
-            configs. (This includes `_all` string or when no configs have been specified)
-        :param bytes: The unit in which to display byte values
+            configs. (This includes `_all` string or when no configs have been specified.)
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: Comma-separated list of column names to display.
@@ -912,7 +1559,12 @@ class CatClient(NamespacedClient):
             be combined with any other query string option.
         :param s: Comma-separated list of column names or column aliases used to sort
             the response.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -963,6 +1615,9 @@ class CatClient(NamespacedClient):
         *,
         datafeed_id: t.Optional[str] = None,
         allow_no_match: t.Optional[bool] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
@@ -1067,7 +1722,7 @@ class CatClient(NamespacedClient):
           application consumption, use the get datafeed statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-datafeeds.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed.
@@ -1078,6 +1733,14 @@ class CatClient(NamespacedClient):
             array when there are no matches and the subset of results when there are
             partial matches. If `false`, the API returns a 404 status code when there
             are no matches or only partial matches.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: Comma-separated list of column names to display.
@@ -1085,7 +1748,12 @@ class CatClient(NamespacedClient):
             be combined with any other query string option.
         :param s: Comma-separated list of column names or column aliases used to sort
             the response.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -1098,6 +1766,8 @@ class CatClient(NamespacedClient):
         __query: t.Dict[str, t.Any] = {}
         if allow_no_match is not None:
             __query["allow_no_match"] = allow_no_match
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -1433,7 +2103,7 @@ class CatClient(NamespacedClient):
           application consumption, use the get anomaly detection job statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-anomaly-detectors.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param allow_no_match: Specifies what to do when the request: * Contains wildcard
@@ -1443,7 +2113,14 @@ class CatClient(NamespacedClient):
             array when there are no matches and the subset of results when there are
             partial matches. If `false`, the API returns a 404 status code when there
             are no matches or only partial matches.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: Comma-separated list of column names to display.
@@ -1451,7 +2128,12 @@ class CatClient(NamespacedClient):
             be combined with any other query string option.
         :param s: Comma-separated list of column names or column aliases used to sort
             the response.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -1618,7 +2300,7 @@ class CatClient(NamespacedClient):
           application consumption, use the get trained models statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-trained-model.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models>`_
 
         :param model_id: A unique identifier for the trained model.
         :param allow_no_match: Specifies what to do when the request: contains wildcard
@@ -1628,7 +2310,14 @@ class CatClient(NamespacedClient):
             when there are no matches and the subset of results when there are partial
             matches. If `false`, the API returns a 404 status code when there are no
             matches or only partial matches.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param from_: Skips the specified number of transforms.
@@ -1638,7 +2327,12 @@ class CatClient(NamespacedClient):
         :param s: A comma-separated list of column names or aliases used to sort the
             response.
         :param size: The maximum number of transforms to display.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -1691,16 +2385,39 @@ class CatClient(NamespacedClient):
     async def nodeattrs(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "attr", "host", "id", "ip", "node", "pid", "port", "value"
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "attr", "host", "id", "ip", "node", "pid", "port", "value"
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -1711,11 +2428,20 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-nodeattrs.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -1726,11 +2452,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/nodeattrs"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -1751,6 +2485,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -1773,8 +2509,201 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        full_id: t.Optional[t.Union[bool, str]] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        full_id: t.Optional[bool] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "build",
+                            "completion.size",
+                            "cpu",
+                            "disk.avail",
+                            "disk.total",
+                            "disk.used",
+                            "disk.used_percent",
+                            "fielddata.evictions",
+                            "fielddata.memory_size",
+                            "file_desc.current",
+                            "file_desc.max",
+                            "file_desc.percent",
+                            "flush.total",
+                            "flush.total_time",
+                            "get.current",
+                            "get.exists_time",
+                            "get.exists_total",
+                            "get.missing_time",
+                            "get.missing_total",
+                            "get.time",
+                            "get.total",
+                            "heap.current",
+                            "heap.max",
+                            "heap.percent",
+                            "http_address",
+                            "id",
+                            "indexing.delete_current",
+                            "indexing.delete_time",
+                            "indexing.delete_total",
+                            "indexing.index_current",
+                            "indexing.index_failed",
+                            "indexing.index_failed_due_to_version_conflict",
+                            "indexing.index_time",
+                            "indexing.index_total",
+                            "ip",
+                            "jdk",
+                            "load_15m",
+                            "load_1m",
+                            "load_5m",
+                            "mappings.total_count",
+                            "mappings.total_estimated_overhead_in_bytes",
+                            "master",
+                            "merges.current",
+                            "merges.current_docs",
+                            "merges.current_size",
+                            "merges.total",
+                            "merges.total_docs",
+                            "merges.total_size",
+                            "merges.total_time",
+                            "name",
+                            "node.role",
+                            "pid",
+                            "port",
+                            "query_cache.evictions",
+                            "query_cache.hit_count",
+                            "query_cache.memory_size",
+                            "query_cache.miss_count",
+                            "ram.current",
+                            "ram.max",
+                            "ram.percent",
+                            "refresh.time",
+                            "refresh.total",
+                            "request_cache.evictions",
+                            "request_cache.hit_count",
+                            "request_cache.memory_size",
+                            "request_cache.miss_count",
+                            "script.cache_evictions",
+                            "script.compilations",
+                            "search.fetch_current",
+                            "search.fetch_time",
+                            "search.fetch_total",
+                            "search.open_contexts",
+                            "search.query_current",
+                            "search.query_time",
+                            "search.query_total",
+                            "search.scroll_current",
+                            "search.scroll_time",
+                            "search.scroll_total",
+                            "segments.count",
+                            "segments.fixed_bitset_memory",
+                            "segments.index_writer_memory",
+                            "segments.memory",
+                            "segments.version_map_memory",
+                            "shard_stats.total_count",
+                            "suggest.current",
+                            "suggest.time",
+                            "suggest.total",
+                            "uptime",
+                            "version",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "build",
+                        "completion.size",
+                        "cpu",
+                        "disk.avail",
+                        "disk.total",
+                        "disk.used",
+                        "disk.used_percent",
+                        "fielddata.evictions",
+                        "fielddata.memory_size",
+                        "file_desc.current",
+                        "file_desc.max",
+                        "file_desc.percent",
+                        "flush.total",
+                        "flush.total_time",
+                        "get.current",
+                        "get.exists_time",
+                        "get.exists_total",
+                        "get.missing_time",
+                        "get.missing_total",
+                        "get.time",
+                        "get.total",
+                        "heap.current",
+                        "heap.max",
+                        "heap.percent",
+                        "http_address",
+                        "id",
+                        "indexing.delete_current",
+                        "indexing.delete_time",
+                        "indexing.delete_total",
+                        "indexing.index_current",
+                        "indexing.index_failed",
+                        "indexing.index_failed_due_to_version_conflict",
+                        "indexing.index_time",
+                        "indexing.index_total",
+                        "ip",
+                        "jdk",
+                        "load_15m",
+                        "load_1m",
+                        "load_5m",
+                        "mappings.total_count",
+                        "mappings.total_estimated_overhead_in_bytes",
+                        "master",
+                        "merges.current",
+                        "merges.current_docs",
+                        "merges.current_size",
+                        "merges.total",
+                        "merges.total_docs",
+                        "merges.total_size",
+                        "merges.total_time",
+                        "name",
+                        "node.role",
+                        "pid",
+                        "port",
+                        "query_cache.evictions",
+                        "query_cache.hit_count",
+                        "query_cache.memory_size",
+                        "query_cache.miss_count",
+                        "ram.current",
+                        "ram.max",
+                        "ram.percent",
+                        "refresh.time",
+                        "refresh.total",
+                        "request_cache.evictions",
+                        "request_cache.hit_count",
+                        "request_cache.memory_size",
+                        "request_cache.miss_count",
+                        "script.cache_evictions",
+                        "script.compilations",
+                        "search.fetch_current",
+                        "search.fetch_time",
+                        "search.fetch_total",
+                        "search.open_contexts",
+                        "search.query_current",
+                        "search.query_time",
+                        "search.query_total",
+                        "search.scroll_current",
+                        "search.scroll_time",
+                        "search.scroll_total",
+                        "segments.count",
+                        "segments.fixed_bitset_memory",
+                        "segments.index_writer_memory",
+                        "segments.memory",
+                        "segments.version_map_memory",
+                        "shard_stats.total_count",
+                        "suggest.current",
+                        "suggest.time",
+                        "suggest.total",
+                        "uptime",
+                        "version",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         include_unloaded_segments: t.Optional[bool] = None,
@@ -1794,23 +2723,36 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-nodes.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes>`_
 
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param full_id: If `true`, return the full node ID. If `false`, return the shortened
             node ID.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param include_unloaded_segments: If true, the response includes information
             from segments that are not loaded into memory.
-        :param master_timeout: Period to wait for a connection to the master node.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
@@ -1858,10 +2800,25 @@ class CatClient(NamespacedClient):
     async def pending_tasks(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal["insertOrder", "priority", "source", "timeInQueue"],
+                    ]
+                ],
+                t.Union[
+                    str, t.Literal["insertOrder", "priority", "source", "timeInQueue"]
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
@@ -1881,11 +2838,20 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-pending-tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -1896,12 +2862,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/pending_tasks"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -1940,10 +2913,25 @@ class CatClient(NamespacedClient):
     async def plugins(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal["component", "description", "id", "name", "version"],
+                    ]
+                ],
+                t.Union[
+                    str, t.Literal["component", "description", "id", "name", "version"]
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         include_bootstrap: t.Optional[bool] = None,
@@ -1951,6 +2939,9 @@ class CatClient(NamespacedClient):
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -1961,11 +2952,20 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-plugins.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param include_bootstrap: Include bootstrap plugins in the response
@@ -1977,11 +2977,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/plugins"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2004,6 +3012,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -2029,7 +3039,74 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "bytes",
+                            "bytes_percent",
+                            "bytes_recovered",
+                            "bytes_total",
+                            "files",
+                            "files_percent",
+                            "files_recovered",
+                            "files_total",
+                            "index",
+                            "repository",
+                            "shard",
+                            "snapshot",
+                            "source_host",
+                            "source_node",
+                            "stage",
+                            "start_time",
+                            "start_time_millis",
+                            "stop_time",
+                            "stop_time_millis",
+                            "target_host",
+                            "target_node",
+                            "time",
+                            "translog_ops",
+                            "translog_ops_percent",
+                            "translog_ops_recovered",
+                            "type",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "bytes",
+                        "bytes_percent",
+                        "bytes_recovered",
+                        "bytes_total",
+                        "files",
+                        "files_percent",
+                        "files_recovered",
+                        "files_total",
+                        "index",
+                        "repository",
+                        "shard",
+                        "snapshot",
+                        "source_host",
+                        "source_node",
+                        "stage",
+                        "start_time",
+                        "start_time_millis",
+                        "stop_time",
+                        "stop_time_millis",
+                        "target_host",
+                        "target_node",
+                        "time",
+                        "translog_ops",
+                        "translog_ops_percent",
+                        "translog_ops_recovered",
+                        "type",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
@@ -2049,24 +3126,37 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-recovery.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
         :param active_only: If `true`, the response only includes ongoing shard recoveries.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param detailed: If `true`, the response includes detailed information about
             shard recoveries.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2117,6 +3207,9 @@ class CatClient(NamespacedClient):
     async def repositories(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
@@ -2127,6 +3220,9 @@ class CatClient(NamespacedClient):
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -2137,8 +3233,16 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-repositories.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: List of columns to appear in the response. Supports simple wildcards.
@@ -2152,11 +3256,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/repositories"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2177,6 +3289,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -2200,13 +3314,61 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "committed",
+                            "compound",
+                            "docs.count",
+                            "docs.deleted",
+                            "generation",
+                            "id",
+                            "index",
+                            "ip",
+                            "prirep",
+                            "searchable",
+                            "segment",
+                            "shard",
+                            "size",
+                            "size.memory",
+                            "version",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "committed",
+                        "compound",
+                        "docs.count",
+                        "docs.deleted",
+                        "generation",
+                        "id",
+                        "index",
+                        "ip",
+                        "prirep",
+                        "searchable",
+                        "segment",
+                        "shard",
+                        "size",
+                        "size.memory",
+                        "version",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -2218,15 +3380,23 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-segments.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -2234,9 +3404,15 @@ class CatClient(NamespacedClient):
             from the cluster state of the master node. In both cases the coordinating
             node will send requests for further information to each selected node.
         :param master_timeout: Period to wait for a connection to the master node.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2269,6 +3445,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -2292,7 +3470,162 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "completion.size",
+                            "dataset.size",
+                            "dense_vector.value_count",
+                            "docs",
+                            "dsparse_vector.value_count",
+                            "fielddata.evictions",
+                            "fielddata.memory_size",
+                            "flush.total",
+                            "flush.total_time",
+                            "get.current",
+                            "get.exists_time",
+                            "get.exists_total",
+                            "get.missing_time",
+                            "get.missing_total",
+                            "get.time",
+                            "get.total",
+                            "id",
+                            "index",
+                            "indexing.delete_current",
+                            "indexing.delete_time",
+                            "indexing.delete_total",
+                            "indexing.index_current",
+                            "indexing.index_failed",
+                            "indexing.index_failed_due_to_version_conflict",
+                            "indexing.index_time",
+                            "indexing.index_total",
+                            "ip",
+                            "merges.current",
+                            "merges.current_docs",
+                            "merges.current_size",
+                            "merges.total",
+                            "merges.total_docs",
+                            "merges.total_size",
+                            "merges.total_time",
+                            "node",
+                            "prirep",
+                            "query_cache.evictions",
+                            "query_cache.memory_size",
+                            "recoverysource.type",
+                            "refresh.time",
+                            "refresh.total",
+                            "search.fetch_current",
+                            "search.fetch_time",
+                            "search.fetch_total",
+                            "search.open_contexts",
+                            "search.query_current",
+                            "search.query_time",
+                            "search.query_total",
+                            "search.scroll_current",
+                            "search.scroll_time",
+                            "search.scroll_total",
+                            "segments.count",
+                            "segments.fixed_bitset_memory",
+                            "segments.index_writer_memory",
+                            "segments.memory",
+                            "segments.version_map_memory",
+                            "seq_no.global_checkpoint",
+                            "seq_no.local_checkpoint",
+                            "seq_no.max",
+                            "shard",
+                            "state",
+                            "store",
+                            "suggest.current",
+                            "suggest.time",
+                            "suggest.total",
+                            "sync_id",
+                            "unassigned.at",
+                            "unassigned.details",
+                            "unassigned.for",
+                            "unassigned.reason",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "completion.size",
+                        "dataset.size",
+                        "dense_vector.value_count",
+                        "docs",
+                        "dsparse_vector.value_count",
+                        "fielddata.evictions",
+                        "fielddata.memory_size",
+                        "flush.total",
+                        "flush.total_time",
+                        "get.current",
+                        "get.exists_time",
+                        "get.exists_total",
+                        "get.missing_time",
+                        "get.missing_total",
+                        "get.time",
+                        "get.total",
+                        "id",
+                        "index",
+                        "indexing.delete_current",
+                        "indexing.delete_time",
+                        "indexing.delete_total",
+                        "indexing.index_current",
+                        "indexing.index_failed",
+                        "indexing.index_failed_due_to_version_conflict",
+                        "indexing.index_time",
+                        "indexing.index_total",
+                        "ip",
+                        "merges.current",
+                        "merges.current_docs",
+                        "merges.current_size",
+                        "merges.total",
+                        "merges.total_docs",
+                        "merges.total_size",
+                        "merges.total_time",
+                        "node",
+                        "prirep",
+                        "query_cache.evictions",
+                        "query_cache.memory_size",
+                        "recoverysource.type",
+                        "refresh.time",
+                        "refresh.total",
+                        "search.fetch_current",
+                        "search.fetch_time",
+                        "search.fetch_total",
+                        "search.open_contexts",
+                        "search.query_current",
+                        "search.query_time",
+                        "search.query_total",
+                        "search.scroll_current",
+                        "search.scroll_time",
+                        "search.scroll_total",
+                        "segments.count",
+                        "segments.fixed_bitset_memory",
+                        "segments.index_writer_memory",
+                        "segments.memory",
+                        "segments.version_map_memory",
+                        "seq_no.global_checkpoint",
+                        "seq_no.local_checkpoint",
+                        "seq_no.max",
+                        "shard",
+                        "state",
+                        "store",
+                        "suggest.current",
+                        "suggest.time",
+                        "suggest.total",
+                        "sync_id",
+                        "unassigned.at",
+                        "unassigned.details",
+                        "unassigned.for",
+                        "unassigned.reason",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
@@ -2312,22 +3645,34 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-shards.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: List of columns to appear in the response. Supports simple wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
-        :param master_timeout: Period to wait for a connection to the master node.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2377,10 +3722,54 @@ class CatClient(NamespacedClient):
         self,
         *,
         repository: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "duration",
+                            "end_epoch",
+                            "end_time",
+                            "failed_shards",
+                            "id",
+                            "indices",
+                            "reason",
+                            "repository",
+                            "start_epoch",
+                            "start_time",
+                            "status",
+                            "successful_shards",
+                            "total_shards",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "duration",
+                        "end_epoch",
+                        "end_time",
+                        "failed_shards",
+                        "id",
+                        "indices",
+                        "reason",
+                        "repository",
+                        "start_epoch",
+                        "start_time",
+                        "status",
+                        "successful_shards",
+                        "total_shards",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
@@ -2401,14 +3790,23 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-snapshots.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots>`_
 
         :param repository: A comma-separated list of snapshot repositories used to limit
             the request. Accepts wildcard expressions. `_all` returns all repositories.
             If any repository fails during the request, Elasticsearch returns an error.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param ignore_unavailable: If `true`, the response does not include information
@@ -2417,7 +3815,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2428,6 +3831,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/snapshots"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2468,11 +3873,59 @@ class CatClient(NamespacedClient):
         self,
         *,
         actions: t.Optional[t.Sequence[str]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         detailed: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "action",
+                            "id",
+                            "ip",
+                            "node",
+                            "node_id",
+                            "parent_task_id",
+                            "port",
+                            "running_time",
+                            "running_time_ns",
+                            "start_time",
+                            "task_id",
+                            "timestamp",
+                            "type",
+                            "version",
+                            "x_opaque_id",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "action",
+                        "id",
+                        "ip",
+                        "node",
+                        "node_id",
+                        "parent_task_id",
+                        "port",
+                        "running_time",
+                        "running_time_ns",
+                        "start_time",
+                        "task_id",
+                        "timestamp",
+                        "type",
+                        "version",
+                        "x_opaque_id",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         nodes: t.Optional[t.Sequence[str]] = None,
@@ -2494,14 +3947,23 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks>`_
 
         :param actions: The task action names, which are used to limit the response.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param detailed: If `true`, the response includes detailed information about
             shard recoveries.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param nodes: Unique node identifiers, which are used to limit the response.
@@ -2510,7 +3972,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param timeout: Period to wait for a response. If no response is received before
             the timeout expires, the request fails and returns an error.
         :param v: When set to `true` will enable verbose output.
@@ -2522,6 +3989,8 @@ class CatClient(NamespacedClient):
         __query: t.Dict[str, t.Any] = {}
         if actions is not None:
             __query["actions"] = actions
+        if bytes is not None:
+            __query["bytes"] = bytes
         if detailed is not None:
             __query["detailed"] = detailed
         if error_trace is not None:
@@ -2567,16 +4036,39 @@ class CatClient(NamespacedClient):
         self,
         *,
         name: t.Optional[str] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "composed_of", "index_patterns", "name", "order", "version"
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "composed_of", "index_patterns", "name", "order", "version"
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -2588,13 +4080,22 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-templates.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates>`_
 
         :param name: The name of the template to return. Accepts wildcard expressions.
             If omitted, all templates are returned.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -2605,6 +4106,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2615,6 +4122,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/templates"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2635,6 +4144,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -2652,10 +4163,68 @@ class CatClient(NamespacedClient):
         self,
         *,
         thread_pool_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "active",
+                            "completed",
+                            "core",
+                            "ephemeral_id",
+                            "host",
+                            "ip",
+                            "keep_alive",
+                            "largest",
+                            "max",
+                            "name",
+                            "node_id",
+                            "node_name",
+                            "pid",
+                            "pool_size",
+                            "port",
+                            "queue",
+                            "queue_size",
+                            "rejected",
+                            "size",
+                            "type",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "active",
+                        "completed",
+                        "core",
+                        "ephemeral_id",
+                        "host",
+                        "ip",
+                        "keep_alive",
+                        "largest",
+                        "max",
+                        "name",
+                        "node_id",
+                        "node_name",
+                        "pid",
+                        "pool_size",
+                        "port",
+                        "queue",
+                        "queue_size",
+                        "rejected",
+                        "size",
+                        "type",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
@@ -2676,10 +4245,18 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-thread-pool.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool>`_
 
         :param thread_pool_patterns: A comma-separated list of thread pool names used
             to limit the request. Accepts wildcard expressions.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: List of columns to appear in the response. Supports simple wildcards.
@@ -2689,11 +4266,16 @@ class CatClient(NamespacedClient):
             the local cluster state. If `false` the list of selected nodes are computed
             from the cluster state of the master node. In both cases the coordinating
             node will send requests for further information to each selected node.
-        :param master_timeout: Period to wait for a connection to the master node.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
-        :param time: The unit used to display time values.
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2704,6 +4286,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/thread_pool"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2746,6 +4330,9 @@ class CatClient(NamespacedClient):
         *,
         transform_id: t.Optional[str] = None,
         allow_no_match: t.Optional[bool] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
@@ -2933,7 +4520,7 @@ class CatClient(NamespacedClient):
           application consumption, use the get transform statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-transforms.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms>`_
 
         :param transform_id: A transform identifier or a wildcard expression. If you
             do not specify one of these options, the API returns information for all
@@ -2945,6 +4532,14 @@ class CatClient(NamespacedClient):
             array when there are no matches and the subset of results when there are
             partial matches. If `false`, the request returns a 404 status code when there
             are no matches or only partial matches.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param from_: Skips the specified number of transforms.
@@ -2954,7 +4549,12 @@ class CatClient(NamespacedClient):
         :param s: Comma-separated list of column names or column aliases used to sort
             the response.
         :param size: The maximum number of transforms to obtain.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2967,6 +4567,8 @@ class CatClient(NamespacedClient):
         __query: t.Dict[str, t.Any] = {}
         if allow_no_match is not None:
             __query["allow_no_match"] = allow_no_match
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
diff -pruN 8.17.2-2/elasticsearch/_async/client/ccr.py 9.2.0-1/elasticsearch/_async/client/ccr.py
--- 8.17.2-2/elasticsearch/_async/client/ccr.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/ccr.py	2025-10-28 16:50:53.000000000 +0000
@@ -33,18 +33,23 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Delete auto-follow patterns.
-          Delete a collection of cross-cluster replication auto-follow patterns.</p>
+          <p>Delete auto-follow patterns.</p>
+          <p>Delete a collection of cross-cluster replication auto-follow patterns.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-delete-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern>`_
 
-        :param name: The name of the auto follow pattern.
+        :param name: The auto-follow pattern collection to delete.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -57,6 +62,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -72,6 +79,8 @@ class CcrClient(NamespacedClient):
     @_rewrite_parameters(
         body_fields=(
             "leader_index",
+            "remote_cluster",
+            "data_stream_name",
             "max_outstanding_read_requests",
             "max_outstanding_write_requests",
             "max_read_request_operation_count",
@@ -82,29 +91,32 @@ class CcrClient(NamespacedClient):
             "max_write_request_operation_count",
             "max_write_request_size",
             "read_poll_timeout",
-            "remote_cluster",
+            "settings",
         ),
     )
     async def follow(
         self,
         *,
         index: str,
+        leader_index: t.Optional[str] = None,
+        remote_cluster: t.Optional[str] = None,
+        data_stream_name: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        leader_index: t.Optional[str] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         max_outstanding_read_requests: t.Optional[int] = None,
         max_outstanding_write_requests: t.Optional[int] = None,
         max_read_request_operation_count: t.Optional[int] = None,
-        max_read_request_size: t.Optional[str] = None,
+        max_read_request_size: t.Optional[t.Union[int, str]] = None,
         max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         max_write_buffer_count: t.Optional[int] = None,
-        max_write_buffer_size: t.Optional[str] = None,
+        max_write_buffer_size: t.Optional[t.Union[int, str]] = None,
         max_write_request_operation_count: t.Optional[int] = None,
-        max_write_request_size: t.Optional[str] = None,
+        max_write_request_size: t.Optional[t.Union[int, str]] = None,
         pretty: t.Optional[bool] = None,
         read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        remote_cluster: t.Optional[str] = None,
+        settings: t.Optional[t.Mapping[str, t.Any]] = None,
         wait_for_active_shards: t.Optional[
             t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
         ] = None,
@@ -118,28 +130,54 @@ class CcrClient(NamespacedClient):
           When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-put-follow.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow>`_
 
-        :param index: The name of the follower index
-        :param leader_index:
-        :param max_outstanding_read_requests:
-        :param max_outstanding_write_requests:
-        :param max_read_request_operation_count:
-        :param max_read_request_size:
-        :param max_retry_delay:
-        :param max_write_buffer_count:
-        :param max_write_buffer_size:
-        :param max_write_request_operation_count:
-        :param max_write_request_size:
-        :param read_poll_timeout:
-        :param remote_cluster:
-        :param wait_for_active_shards: Sets the number of shard copies that must be active
-            before returning. Defaults to 0. Set to `all` for all shard copies, otherwise
-            set to any non-negative value less than or equal to the total number of copies
-            for the shard (number of replicas + 1)
+        :param index: The name of the follower index.
+        :param leader_index: The name of the index in the leader cluster to follow.
+        :param remote_cluster: The remote cluster containing the leader index.
+        :param data_stream_name: If the leader index is part of a data stream, the name
+            to which the local data stream for the followed index should be renamed.
+        :param master_timeout: Period to wait for a connection to the master node.
+        :param max_outstanding_read_requests: The maximum number of outstanding reads
+            requests from the remote cluster.
+        :param max_outstanding_write_requests: The maximum number of outstanding write
+            requests on the follower.
+        :param max_read_request_operation_count: The maximum number of operations to
+            pull per read from the remote cluster.
+        :param max_read_request_size: The maximum size in bytes of per read of a batch
+            of operations pulled from the remote cluster.
+        :param max_retry_delay: The maximum time to wait before retrying an operation
+            that failed exceptionally. An exponential backoff strategy is employed when
+            retrying.
+        :param max_write_buffer_count: The maximum number of operations that can be queued
+            for writing. When this limit is reached, reads from the remote cluster will
+            be deferred until the number of queued operations goes below the limit.
+        :param max_write_buffer_size: The maximum total bytes of operations that can
+            be queued for writing. When this limit is reached, reads from the remote
+            cluster will be deferred until the total bytes of queued operations goes
+            below the limit.
+        :param max_write_request_operation_count: The maximum number of operations per
+            bulk write request executed on the follower.
+        :param max_write_request_size: The maximum total bytes of operations per bulk
+            write request executed on the follower.
+        :param read_poll_timeout: The maximum time to wait for new operations on the
+            remote cluster when the follower index is synchronized with the leader index.
+            When the timeout has elapsed, the poll for operations will return to the
+            follower so that it can update some statistics. Then the follower will immediately
+            attempt to read from the leader again.
+        :param settings: Settings to override from the leader index.
+        :param wait_for_active_shards: Specifies the number of shards to wait on being
+            active before responding. This defaults to waiting on none of the shards
+            to be active. A shard must be restored from the leader index before being
+            active. Restoring a follower shard requires transferring all the remote Lucene
+            segment files to the follower index.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
+        if leader_index is None and body is None:
+            raise ValueError("Empty value passed for parameter 'leader_index'")
+        if remote_cluster is None and body is None:
+            raise ValueError("Empty value passed for parameter 'remote_cluster'")
         __path_parts: t.Dict[str, str] = {"index": _quote(index)}
         __path = f'/{__path_parts["index"]}/_ccr/follow'
         __query: t.Dict[str, t.Any] = {}
@@ -150,6 +188,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if wait_for_active_shards is not None:
@@ -157,6 +197,10 @@ class CcrClient(NamespacedClient):
         if not __body:
             if leader_index is not None:
                 __body["leader_index"] = leader_index
+            if remote_cluster is not None:
+                __body["remote_cluster"] = remote_cluster
+            if data_stream_name is not None:
+                __body["data_stream_name"] = data_stream_name
             if max_outstanding_read_requests is not None:
                 __body["max_outstanding_read_requests"] = max_outstanding_read_requests
             if max_outstanding_write_requests is not None:
@@ -183,8 +227,8 @@ class CcrClient(NamespacedClient):
                 __body["max_write_request_size"] = max_write_request_size
             if read_poll_timeout is not None:
                 __body["read_poll_timeout"] = read_poll_timeout
-            if remote_cluster is not None:
-                __body["remote_cluster"] = remote_cluster
+            if settings is not None:
+                __body["settings"] = settings
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "PUT",
@@ -204,20 +248,24 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get follower information.
-          Get information about all cross-cluster replication follower indices.
+          <p>Get follower information.</p>
+          <p>Get information about all cross-cluster replication follower indices.
           For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-follow-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info>`_
 
-        :param index: A comma-separated list of index patterns; use `_all` to perform
-            the operation on all indices
+        :param index: A comma-delimited list of follower index patterns.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -230,6 +278,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -251,19 +301,21 @@ class CcrClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get follower stats.
-          Get cross-cluster replication follower stats.
+          <p>Get follower stats.</p>
+          <p>Get cross-cluster replication follower stats.
           The API returns shard-level stats about the &quot;following tasks&quot; associated with each shard for the specified indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-follow-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats>`_
 
-        :param index: A comma-separated list of index patterns; use `_all` to perform
-            the operation on all indices
+        :param index: A comma-delimited list of index patterns.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -278,6 +330,8 @@ class CcrClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -308,6 +362,7 @@ class CcrClient(NamespacedClient):
         human: t.Optional[bool] = None,
         leader_remote_cluster: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -325,7 +380,7 @@ class CcrClient(NamespacedClient):
           The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-forget-follower.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower>`_
 
         :param index: the name of the leader index for which specified follower retention
             leases should be removed
@@ -333,6 +388,8 @@ class CcrClient(NamespacedClient):
         :param follower_index:
         :param follower_index_uuid:
         :param leader_remote_cluster:
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -348,6 +405,8 @@ class CcrClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         if not __body:
             if follower_cluster is not None:
                 __body["follower_cluster"] = follower_cluster
@@ -376,19 +435,24 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get auto-follow patterns.
-          Get cross-cluster replication auto-follow patterns.</p>
+          <p>Get auto-follow patterns.</p>
+          <p>Get cross-cluster replication auto-follow patterns.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1>`_
 
-        :param name: Specifies the auto-follow pattern collection that you want to retrieve.
-            If you do not specify a name, the API returns information for all collections.
+        :param name: The auto-follow pattern collection that you want to retrieve. If
+            you do not specify a name, the API returns information for all collections.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         __path_parts: t.Dict[str, str]
         if name not in SKIP_IN_PATH:
@@ -404,6 +468,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -424,13 +490,14 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Pause an auto-follow pattern.
-          Pause a cross-cluster replication auto-follow pattern.
+          <p>Pause an auto-follow pattern.</p>
+          <p>Pause a cross-cluster replication auto-follow pattern.
           When the API returns, the auto-follow pattern is inactive.
           New indices that are created on the remote cluster and match the auto-follow patterns are ignored.</p>
           <p>You can resume auto-following with the resume auto-follow pattern API.
@@ -438,10 +505,13 @@ class CcrClient(NamespacedClient):
           Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-pause-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern>`_
 
-        :param name: The name of the auto follow pattern that should pause discovering
-            new indices to follow.
+        :param name: The name of the auto-follow pattern to pause.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -454,6 +524,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -474,22 +546,26 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Pause a follower.
-          Pause a cross-cluster replication follower index.
+          <p>Pause a follower.</p>
+          <p>Pause a cross-cluster replication follower index.
           The follower index will not fetch any additional operations from the leader index.
           You can resume following with the resume follower API.
           You can pause and resume a follower index to change the configuration of the following task.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-pause-follow.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow>`_
 
-        :param index: The name of the follower index that should pause following its
-            leader index.
+        :param index: The name of the follower index.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -502,6 +578,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -544,6 +622,7 @@ class CcrClient(NamespacedClient):
         human: t.Optional[bool] = None,
         leader_index_exclusion_patterns: t.Optional[t.Sequence[str]] = None,
         leader_index_patterns: t.Optional[t.Sequence[str]] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         max_outstanding_read_requests: t.Optional[int] = None,
         max_outstanding_write_requests: t.Optional[int] = None,
         max_read_request_operation_count: t.Optional[int] = None,
@@ -569,7 +648,7 @@ class CcrClient(NamespacedClient):
           NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-put-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern>`_
 
         :param name: The name of the collection of auto-follow patterns.
         :param remote_cluster: The remote cluster containing the leader indices to match
@@ -584,6 +663,7 @@ class CcrClient(NamespacedClient):
             or more leader_index_exclusion_patterns won’t be followed.
         :param leader_index_patterns: An array of simple index patterns to match against
             indices in the remote cluster specified by the remote_cluster field.
+        :param master_timeout: Period to wait for a connection to the master node.
         :param max_outstanding_read_requests: The maximum number of outstanding reads
             requests from the remote cluster.
         :param max_outstanding_write_requests: The maximum number of outstanding reads
@@ -628,6 +708,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -688,21 +770,25 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Resume an auto-follow pattern.
-          Resume a cross-cluster replication auto-follow pattern that was paused.
+          <p>Resume an auto-follow pattern.</p>
+          <p>Resume a cross-cluster replication auto-follow pattern that was paused.
           The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster.
           Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-resume-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern>`_
 
-        :param name: The name of the auto follow pattern to resume discovering new indices
-            to follow.
+        :param name: The name of the auto-follow pattern to resume.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -715,6 +801,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -748,6 +836,7 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         max_outstanding_read_requests: t.Optional[int] = None,
         max_outstanding_write_requests: t.Optional[int] = None,
         max_read_request_operation_count: t.Optional[int] = None,
@@ -771,9 +860,10 @@ class CcrClient(NamespacedClient):
           When this API returns, the follower index will resume fetching operations from the leader index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-resume-follow.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow>`_
 
         :param index: The name of the follow index to resume following.
+        :param master_timeout: Period to wait for a connection to the master node.
         :param max_outstanding_read_requests:
         :param max_outstanding_write_requests:
         :param max_read_request_operation_count:
@@ -797,6 +887,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -848,16 +940,25 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get cross-cluster replication stats.
-          This API returns stats about auto-following and the same shard-level stats as the get follower stats API.</p>
+          <p>Get cross-cluster replication stats.</p>
+          <p>This API returns stats about auto-following and the same shard-level stats as the get follower stats API.</p>
+
 
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats>`_
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-stats.html>`_
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ccr/stats"
@@ -868,8 +969,12 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -888,22 +993,29 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Unfollow an index.
-          Convert a cross-cluster replication follower index to a regular index.
+          <p>Unfollow an index.</p>
+          <p>Convert a cross-cluster replication follower index to a regular index.
           The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication.
           The follower index must be paused and closed before you call the unfollow API.</p>
-          <p>NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.</p>
-
+          <blockquote>
+          <p>info
+          Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.</p>
+          </blockquote>
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-unfollow.html>`_
 
-        :param index: The name of the follower index that should be turned into a regular
-            index.
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow>`_
+
+        :param index: The name of the follower index.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -916,6 +1028,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
diff -pruN 8.17.2-2/elasticsearch/_async/client/cluster.py 9.2.0-1/elasticsearch/_async/client/cluster.py
--- 8.17.2-2/elasticsearch/_async/client/cluster.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/cluster.py	2025-10-28 16:50:53.000000000 +0000
@@ -38,6 +38,7 @@ class ClusterClient(NamespacedClient):
         include_disk_info: t.Optional[bool] = None,
         include_yes_decisions: t.Optional[bool] = None,
         index: t.Optional[str] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         primary: t.Optional[bool] = None,
         shard: t.Optional[int] = None,
@@ -48,23 +49,25 @@ class ClusterClient(NamespacedClient):
 
           <p>Explain the shard allocations.
           Get explanations for shard allocations in the cluster.
+          This API accepts the current_node, index, primary and shard parameters in the request body or in query parameters, but not in both at the same time.
           For unassigned shards, it provides an explanation for why the shard is unassigned.
           For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node.
-          This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.</p>
+          This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.
+          Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-allocation-explain.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain>`_
 
-        :param current_node: Specifies the node ID or the name of the node to only explain
-            a shard that is currently located on the specified node.
+        :param current_node: Explain a shard only if it is currently located on the specified
+            node name or node ID.
         :param include_disk_info: If true, returns information about disk usage and shard
             sizes.
         :param include_yes_decisions: If true, returns YES decisions in explanation.
-        :param index: Specifies the name of the index that you would like an explanation
-            for.
-        :param primary: If true, returns explanation for the primary shard for the given
-            shard ID.
-        :param shard: Specifies the ID of the shard that you would like an explanation
+        :param index: The name of the index that you would like an explanation for.
+        :param master_timeout: Period to wait for a connection to the master node.
+        :param primary: If true, returns an explanation for the primary shard for the
+            specified shard ID.
+        :param shard: An identifier for the shard that you would like an explanation
             for.
         """
         __path_parts: t.Dict[str, str] = {}
@@ -81,6 +84,8 @@ class ClusterClient(NamespacedClient):
             __query["include_disk_info"] = include_disk_info
         if include_yes_decisions is not None:
             __query["include_yes_decisions"] = include_yes_decisions
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -126,7 +131,7 @@ class ClusterClient(NamespacedClient):
           Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template>`_
 
         :param name: Comma-separated list or wildcard expression of component template
             names used to limit the request.
@@ -170,6 +175,7 @@ class ClusterClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         wait_for_removal: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -180,8 +186,9 @@ class ClusterClient(NamespacedClient):
           Remove master-eligible nodes from the voting configuration exclusion list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/voting-config-exclusions.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions>`_
 
+        :param master_timeout: Period to wait for a connection to the master node.
         :param wait_for_removal: Specifies whether to wait for all excluded nodes to
             be removed from the cluster before clearing the voting configuration exclusions
             list. Defaults to true, meaning that all excluded nodes must be removed from
@@ -198,6 +205,8 @@ class ClusterClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if wait_for_removal is not None:
@@ -231,7 +240,7 @@ class ClusterClient(NamespacedClient):
           Returns information about whether a particular component template exists.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template>`_
 
         :param name: Comma-separated list of component template names used to limit the
             request. Wildcard (*) expressions are supported.
@@ -282,6 +291,7 @@ class ClusterClient(NamespacedClient):
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        settings_filter: t.Optional[t.Union[str, t.Sequence[str]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -290,7 +300,7 @@ class ClusterClient(NamespacedClient):
           Get information about component templates.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template>`_
 
         :param name: Comma-separated list of component template names used to limit the
             request. Wildcard (`*`) expressions are supported.
@@ -302,6 +312,8 @@ class ClusterClient(NamespacedClient):
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
+        :param settings_filter: Filter out results, for example to filter out sensitive
+            information. Supports wildcards or full settings keys
         """
         __path_parts: t.Dict[str, str]
         if name not in SKIP_IN_PATH:
@@ -327,6 +339,8 @@ class ClusterClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if settings_filter is not None:
+            __query["settings_filter"] = settings_filter
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -353,15 +367,20 @@ class ClusterClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get cluster-wide settings.
-          By default, it returns only settings that have been explicitly defined.</p>
+          <p>Get cluster-wide settings.</p>
+          <p>By default, it returns only settings that have been explicitly defined.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-get-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings>`_
 
         :param flat_settings: If `true`, returns settings in flat format.
-        :param include_defaults: If `true`, returns default cluster settings from the
-            local node.
+        :param include_defaults: If `true`, also returns default values for all other
+            cluster settings, reflecting the values in the `elasticsearch.yml` file of
+            one of the nodes in the cluster. If the nodes in your cluster do not all
+            have the same values in their `elasticsearch.yml` config files then the values
+            returned by this API may vary from invocation to invocation and may not reflect
+            the values that Elasticsearch uses in all situations. Use the `GET _nodes/settings`
+            API to fetch the settings for each individual node in your cluster.
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
@@ -433,14 +452,14 @@ class ClusterClient(NamespacedClient):
         wait_for_no_relocating_shards: t.Optional[bool] = None,
         wait_for_nodes: t.Optional[t.Union[int, str]] = None,
         wait_for_status: t.Optional[
-            t.Union[str, t.Literal["green", "red", "yellow"]]
+            t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]]
         ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get the cluster health status.
-          You can also use the API to get the health status of only specified data streams and indices.
+          <p>Get the cluster health status.</p>
+          <p>You can also use the API to get the health status of only specified data streams and indices.
           For data streams, the API retrieves the health status of the stream’s backing indices.</p>
           <p>The cluster health status is: green, yellow or red.
           On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated.
@@ -449,7 +468,7 @@ class ClusterClient(NamespacedClient):
           The cluster status is controlled by the worst index status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-health.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health>`_
 
         :param index: Comma-separated list of data streams, indices, and index aliases
             used to limit the request. Wildcard expressions (`*`) are supported. To target
@@ -557,7 +576,7 @@ class ClusterClient(NamespacedClient):
           Returns basic information about the cluster.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info>`_
 
         :param target: Limits the information returned to the specific target. Supports
             a comma-separated list, such as http,ingest.
@@ -606,7 +625,7 @@ class ClusterClient(NamespacedClient):
           However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-pending.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks>`_
 
         :param local: If `true`, the request retrieves information from the local node
             only. If `false`, information is retrieved from the master node.
@@ -646,6 +665,7 @@ class ClusterClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         node_ids: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         node_names: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         pretty: t.Optional[bool] = None,
@@ -671,8 +691,9 @@ class ClusterClient(NamespacedClient):
           They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/voting-config-exclusions.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions>`_
 
+        :param master_timeout: Period to wait for a connection to the master node.
         :param node_ids: A comma-separated list of the persistent ids of the nodes to
             exclude from the voting configuration. If specified, you may not also specify
             node_names.
@@ -692,6 +713,8 @@ class ClusterClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if node_ids is not None:
             __query["node_ids"] = node_ids
         if node_names is not None:
@@ -719,6 +742,7 @@ class ClusterClient(NamespacedClient):
         *,
         name: str,
         template: t.Optional[t.Mapping[str, t.Any]] = None,
+        cause: t.Optional[str] = None,
         create: t.Optional[bool] = None,
         deprecated: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
@@ -749,7 +773,7 @@ class ClusterClient(NamespacedClient):
           To be applied, a component template must be included in an index template's <code>composed_of</code> list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template>`_
 
         :param name: Name of the component template to create. Elasticsearch includes
             the following built-in component templates: `logs-mappings`; `logs-settings`;
@@ -762,6 +786,7 @@ class ClusterClient(NamespacedClient):
             update settings API.
         :param template: The template to be applied which includes mappings, settings,
             or aliases configuration.
+        :param cause: User defined reason for create the component template.
         :param create: If `true`, this request cannot replace or update existing component
             templates.
         :param deprecated: Marks this index template as deprecated. When creating or
@@ -786,6 +811,8 @@ class ClusterClient(NamespacedClient):
         __path = f'/_component_template/{__path_parts["name"]}'
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if cause is not None:
+            __query["cause"] = cause
         if create is not None:
             __query["create"] = create
         if error_trace is not None:
@@ -838,8 +865,8 @@ class ClusterClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Update the cluster settings.
-          Configure and update dynamic settings on a running cluster.
+          <p>Update the cluster settings.</p>
+          <p>Configure and update dynamic settings on a running cluster.
           You can also configure dynamic settings locally on an unstarted or shut down node in <code>elasticsearch.yml</code>.</p>
           <p>Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart.
           You can also reset transient or persistent settings by assigning them a null value.</p>
@@ -854,13 +881,13 @@ class ClusterClient(NamespacedClient):
           If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-update-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings>`_
 
         :param flat_settings: Return settings in flat format (default: false)
         :param master_timeout: Explicit operation timeout for connection to master node
-        :param persistent:
+        :param persistent: The settings that persist after the cluster restarts.
         :param timeout: Explicit operation timeout
-        :param transient:
+        :param transient: The settings that do not persist after the cluster restarts.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cluster/settings"
@@ -908,12 +935,19 @@ class ClusterClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get remote cluster information.
-          Get all of the configured remote cluster information.
-          This API returns connection and endpoint information keyed by the configured remote cluster alias.</p>
+          <p>Get remote cluster information.</p>
+          <p>Get information about configured remote clusters.
+          The API returns connection and endpoint information keyed by the configured remote cluster alias.</p>
+          <blockquote>
+          <p>info
+          This API returns information that reflects current state on the local cluster.
+          The <code>connected</code> field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it.
+          Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster.
+          To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the <a href="https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster">resolve cluster endpoint</a>.</p>
+          </blockquote>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-remote-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_remote/info"
@@ -970,7 +1004,7 @@ class ClusterClient(NamespacedClient):
           <p>Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the <code>?retry_failed</code> URI query parameter, which will attempt a single retry round for these shards.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-reroute.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute>`_
 
         :param commands: Defines the commands to perform.
         :param dry_run: If true, then the request simulates the operation. It will calculate
@@ -1075,7 +1109,7 @@ class ClusterClient(NamespacedClient):
           Instead, obtain the information you require using other more stable cluster APIs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-state.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state>`_
 
         :param metric: Limit the information returned to the specified metrics
         :param index: A comma-separated list of index names; use `_all` or empty string
@@ -1090,7 +1124,8 @@ class ClusterClient(NamespacedClient):
             when unavailable (missing or closed)
         :param local: Return local information, do not retrieve the state from master
             node (default: false)
-        :param master_timeout: Specify timeout for connection to master
+        :param master_timeout: Timeout for waiting for new cluster state in case it is
+            blocked
         :param wait_for_metadata_version: Wait for the metadata version to be equal or
             greater than the specified metadata version
         :param wait_for_timeout: The maximum time to wait for wait_for_metadata_version
@@ -1163,7 +1198,7 @@ class ClusterClient(NamespacedClient):
           Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats>`_
 
         :param node_id: Comma-separated list of node filters used to limit returned information.
             Defaults to all nodes in the cluster.
diff -pruN 8.17.2-2/elasticsearch/_async/client/connector.py 9.2.0-1/elasticsearch/_async/client/connector.py
--- 8.17.2-2/elasticsearch/_async/client/connector.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/connector.py	2025-10-28 16:50:53.000000000 +0000
@@ -49,7 +49,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the <code>last_seen</code> field in the connector and set it to the current timestamp.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/check-in-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in>`_
 
         :param connector_id: The unique identifier of the connector to be checked in
         """
@@ -85,6 +85,7 @@ class ConnectorClient(NamespacedClient):
         delete_sync_jobs: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        hard: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -98,11 +99,12 @@ class ConnectorClient(NamespacedClient):
           These need to be removed manually.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete>`_
 
         :param connector_id: The unique identifier of the connector to be deleted
         :param delete_sync_jobs: A flag indicating if associated sync jobs should be
-            also removed. Defaults to false.
+            also removed.
+        :param hard: A flag indicating if the connector should be hard deleted.
         """
         if connector_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'connector_id'")
@@ -115,6 +117,8 @@ class ConnectorClient(NamespacedClient):
             __query["error_trace"] = error_trace
         if filter_path is not None:
             __query["filter_path"] = filter_path
+        if hard is not None:
+            __query["hard"] = hard
         if human is not None:
             __query["human"] = human
         if pretty is not None:
@@ -138,6 +142,7 @@ class ConnectorClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        include_deleted: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -147,9 +152,11 @@ class ConnectorClient(NamespacedClient):
           <p>Get the details about a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get>`_
 
         :param connector_id: The unique identifier of the connector
+        :param include_deleted: A flag to indicate if the desired connector should be
+            fetched, even if it was soft-deleted.
         """
         if connector_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'connector_id'")
@@ -162,6 +169,8 @@ class ConnectorClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if include_deleted is not None:
+            __query["include_deleted"] = include_deleted
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -247,7 +256,7 @@ class ConnectorClient(NamespacedClient):
           This action is used for analytics and monitoring.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-last-sync-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param last_access_control_sync_error:
@@ -333,6 +342,7 @@ class ConnectorClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         from_: t.Optional[int] = None,
         human: t.Optional[bool] = None,
+        include_deleted: t.Optional[bool] = None,
         index_name: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         pretty: t.Optional[bool] = None,
         query: t.Optional[str] = None,
@@ -346,11 +356,13 @@ class ConnectorClient(NamespacedClient):
           <p>Get information about all connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list>`_
 
         :param connector_name: A comma-separated list of connector names to fetch connector
             documents for
-        :param from_: Starting offset (default: 0)
+        :param from_: Starting offset
+        :param include_deleted: A flag to indicate if the desired connector should be
+            fetched, even if it was soft-deleted.
         :param index_name: A comma-separated list of connector index names to fetch connector
             documents for
         :param query: A wildcard query string that filters connectors with matching name,
@@ -372,6 +384,8 @@ class ConnectorClient(NamespacedClient):
             __query["from"] = from_
         if human is not None:
             __query["human"] = human
+        if include_deleted is not None:
+            __query["include_deleted"] = include_deleted
         if index_name is not None:
             __query["index_name"] = index_name
         if pretty is not None:
@@ -427,7 +441,7 @@ class ConnectorClient(NamespacedClient):
           Self-managed connectors (Connector clients) are self-managed on your infrastructure.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put>`_
 
         :param description:
         :param index_name:
@@ -509,7 +523,7 @@ class ConnectorClient(NamespacedClient):
           <p>Create or update a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put>`_
 
         :param connector_id: The unique identifier of the connector to be created or
             updated. ID is auto-generated if not provided.
@@ -584,7 +598,7 @@ class ConnectorClient(NamespacedClient):
           The connector service is then responsible for setting the status of connector sync jobs to cancelled.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cancel-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job
         """
@@ -635,7 +649,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/check-in-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job
             to be checked in.
@@ -695,7 +709,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/claim-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job.
         :param worker_hostname: The host name of the current system that will run the
@@ -757,7 +771,7 @@ class ConnectorClient(NamespacedClient):
           This is a destructive action that is not recoverable.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job
             to be deleted
@@ -811,7 +825,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/set-connector-sync-job-error-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error>`_
 
         :param connector_sync_job_id: The unique identifier for the connector sync job.
         :param error: The error for the connector sync job error field.
@@ -865,7 +879,7 @@ class ConnectorClient(NamespacedClient):
           <p>Get a connector sync job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job
         """
@@ -938,10 +952,10 @@ class ConnectorClient(NamespacedClient):
           <p>Get information about all stored connector sync jobs listed by their creation date in ascending order.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-connector-sync-jobs-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list>`_
 
         :param connector_id: A connector id to fetch connector sync jobs for
-        :param from_: Starting offset (default: 0)
+        :param from_: Starting offset
         :param job_type: A comma-separated list of job types to fetch the sync jobs for
         :param size: Specifies a max number of results to get
         :param status: A sync job status to fetch connector sync jobs for
@@ -1004,7 +1018,7 @@ class ConnectorClient(NamespacedClient):
           <p>Create a connector sync job document in the internal index and initialize its counters and timestamps with default values.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post>`_
 
         :param id: The id of the associated connector
         :param job_type:
@@ -1080,7 +1094,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/set-connector-sync-job-stats-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job.
         :param deleted_document_count: The number of documents the sync job deleted.
@@ -1163,7 +1177,7 @@ class ConnectorClient(NamespacedClient):
           <p>Activates the valid draft filtering for a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-filtering-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         """
@@ -1216,7 +1230,7 @@ class ConnectorClient(NamespacedClient):
           Self-managed connectors (connector clients) do not use this field.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-api-key-id-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param api_key_id:
@@ -1275,7 +1289,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the configuration field in the connector document.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-configuration-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param configuration:
@@ -1335,7 +1349,7 @@ class ConnectorClient(NamespacedClient):
           Otherwise, if the error is reset to null, the connector status is updated to connected.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-error-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param error:
@@ -1403,7 +1417,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-features-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features>`_
 
         :param connector_id: The unique identifier of the connector to be updated.
         :param features:
@@ -1464,7 +1478,7 @@ class ConnectorClient(NamespacedClient):
           The filtering property is used to configure sync rules (both basic and advanced) for a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-filtering-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param advanced_snippet:
@@ -1525,7 +1539,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the draft filtering validation info for a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-filtering-validation-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param validation:
@@ -1582,7 +1596,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the <code>index_name</code> field of a connector, specifying the index where the data ingested by the connector is stored.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-index-name-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param index_name:
@@ -1639,7 +1653,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector name and description.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-name-description-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param description:
@@ -1696,7 +1710,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector is_native flag.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-native-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param is_native:
@@ -1753,7 +1767,7 @@ class ConnectorClient(NamespacedClient):
           <p>When you create a new connector, the configuration of an ingest pipeline is populated with default settings.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-pipeline-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param pipeline:
@@ -1809,7 +1823,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector scheduling.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-scheduling-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param scheduling:
@@ -1865,7 +1879,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector service type.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-service-type-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param service_type:
@@ -1928,7 +1942,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-status-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param status:
diff -pruN 8.17.2-2/elasticsearch/_async/client/dangling_indices.py 9.2.0-1/elasticsearch/_async/client/dangling_indices.py
--- 8.17.2-2/elasticsearch/_async/client/dangling_indices.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/dangling_indices.py	2025-10-28 16:50:53.000000000 +0000
@@ -46,7 +46,7 @@ class DanglingIndicesClient(NamespacedCl
           For example, this can happen if you delete more than <code>cluster.indices.tombstones.size</code> indices while an Elasticsearch node is offline.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-delete.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index>`_
 
         :param index_uuid: The UUID of the index to delete. Use the get dangling indices
             API to find the UUID.
@@ -107,7 +107,7 @@ class DanglingIndicesClient(NamespacedCl
           For example, this can happen if you delete more than <code>cluster.indices.tombstones.size</code> indices while an Elasticsearch node is offline.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-import.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index>`_
 
         :param index_uuid: The UUID of the index to import. Use the get dangling indices
             API to locate the UUID.
@@ -168,7 +168,7 @@ class DanglingIndicesClient(NamespacedCl
           <p>Use this API to list dangling indices, which you can then import or delete.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-indices-list.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_dangling"
diff -pruN 8.17.2-2/elasticsearch/_async/client/enrich.py 9.2.0-1/elasticsearch/_async/client/enrich.py
--- 8.17.2-2/elasticsearch/_async/client/enrich.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/enrich.py	2025-10-28 16:50:53.000000000 +0000
@@ -33,6 +33,7 @@ class EnrichClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -42,9 +43,10 @@ class EnrichClient(NamespacedClient):
           Deletes an existing enrich policy and its enrich index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-enrich-policy-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy>`_
 
         :param name: Enrich policy to delete.
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -57,6 +59,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -77,6 +81,7 @@ class EnrichClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         wait_for_completion: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -87,9 +92,10 @@ class EnrichClient(NamespacedClient):
           Create the enrich index for an existing enrich policy.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/execute-enrich-policy-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy>`_
 
         :param name: Enrich policy to execute.
+        :param master_timeout: Period to wait for a connection to the master node.
         :param wait_for_completion: If `true`, the request blocks other enrich policy
             execution requests until complete.
         """
@@ -104,6 +110,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if wait_for_completion is not None:
@@ -126,6 +134,7 @@ class EnrichClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -135,10 +144,11 @@ class EnrichClient(NamespacedClient):
           Returns information about an enrich policy.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-enrich-policy-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy>`_
 
         :param name: Comma-separated list of enrich policy names used to limit the request.
             To return information for all enrich policies, omit this parameter.
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str]
         if name not in SKIP_IN_PATH:
@@ -154,6 +164,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -177,6 +189,7 @@ class EnrichClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         geo_match: t.Optional[t.Mapping[str, t.Any]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         match: t.Optional[t.Mapping[str, t.Any]] = None,
         pretty: t.Optional[bool] = None,
         range: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -189,11 +202,12 @@ class EnrichClient(NamespacedClient):
           Creates an enrich policy.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-enrich-policy-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy>`_
 
         :param name: Name of the enrich policy to create or update.
         :param geo_match: Matches enrich data to incoming documents based on a `geo_shape`
             query.
+        :param master_timeout: Period to wait for a connection to the master node.
         :param match: Matches enrich data to incoming documents based on a `term` query.
         :param range: Matches a number, date, or IP address in incoming documents to
             a range in the enrich index based on a `term` query.
@@ -210,6 +224,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -237,6 +253,7 @@ class EnrichClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -246,7 +263,9 @@ class EnrichClient(NamespacedClient):
           Returns enrich coordinator statistics and information about enrich policies that are currently executing.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/enrich-stats-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats>`_
+
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_enrich/_stats"
@@ -257,6 +276,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
diff -pruN 8.17.2-2/elasticsearch/_async/client/eql.py 9.2.0-1/elasticsearch/_async/client/eql.py
--- 8.17.2-2/elasticsearch/_async/client/eql.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/eql.py	2025-10-28 16:50:53.000000000 +0000
@@ -43,7 +43,7 @@ class EqlClient(NamespacedClient):
           The API also deletes results for the search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/eql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete>`_
 
         :param id: Identifier for the search to delete. A search ID is provided in the
             EQL search API's response for an async search. A search ID is also provided
@@ -93,7 +93,7 @@ class EqlClient(NamespacedClient):
           Get the current status and available results for an async EQL search or a stored synchronous EQL search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-eql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get>`_
 
         :param id: Identifier for the search.
         :param keep_alive: Period for which the search and its results are stored on
@@ -147,7 +147,7 @@ class EqlClient(NamespacedClient):
           Get the current status for an async EQL search or a stored synchronous EQL search without returning results.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-eql-status-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status>`_
 
         :param id: Identifier for the search.
         """
@@ -177,6 +177,8 @@ class EqlClient(NamespacedClient):
     @_rewrite_parameters(
         body_fields=(
             "query",
+            "allow_partial_search_results",
+            "allow_partial_sequence_results",
             "case_sensitive",
             "event_category_field",
             "fetch_size",
@@ -184,6 +186,7 @@ class EqlClient(NamespacedClient):
             "filter",
             "keep_alive",
             "keep_on_completion",
+            "max_samples_per_key",
             "result_position",
             "runtime_mappings",
             "size",
@@ -198,7 +201,10 @@ class EqlClient(NamespacedClient):
         index: t.Union[str, t.Sequence[str]],
         query: t.Optional[str] = None,
         allow_no_indices: t.Optional[bool] = None,
+        allow_partial_search_results: t.Optional[bool] = None,
+        allow_partial_sequence_results: t.Optional[bool] = None,
         case_sensitive: t.Optional[bool] = None,
+        ccs_minimize_roundtrips: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         event_category_field: t.Optional[str] = None,
         expand_wildcards: t.Optional[
@@ -221,7 +227,9 @@ class EqlClient(NamespacedClient):
         ignore_unavailable: t.Optional[bool] = None,
         keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         keep_on_completion: t.Optional[bool] = None,
+        max_samples_per_key: t.Optional[int] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None,
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         size: t.Optional[int] = None,
@@ -240,15 +248,29 @@ class EqlClient(NamespacedClient):
           EQL assumes each document in a data stream or index corresponds to an event.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/eql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search>`_
 
         :param index: The name of the index to scope the operation
         :param query: EQL query you wish to run.
-        :param allow_no_indices:
+        :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
+            into no concrete indices. (This includes `_all` string or when no indices
+            have been specified)
+        :param allow_partial_search_results: Allow query execution also in case of shard
+            failures. If true, the query will keep running and will return results based
+            on the available shards. For sequences, the behavior can be further refined
+            using allow_partial_sequence_results
+        :param allow_partial_sequence_results: This flag applies only to sequences and
+            has effect only if allow_partial_search_results=true. If true, the sequence
+            query will return results based on the available shards, ignoring the others.
+            If false, the sequence query will return successfully, but will always have
+            empty results.
         :param case_sensitive:
+        :param ccs_minimize_roundtrips: Indicates whether network round-trips should
+            be minimized as part of cross-cluster search requests execution
         :param event_category_field: Field containing the event classification, such
             as process, file, or network.
-        :param expand_wildcards:
+        :param expand_wildcards: Whether to expand wildcard expression to concrete indices
+            that are open, closed or both.
         :param fetch_size: Maximum number of events to search at a time for sequence
             queries.
         :param fields: Array of wildcard (*) patterns. The response returns values for
@@ -259,6 +281,15 @@ class EqlClient(NamespacedClient):
             in the response.
         :param keep_alive:
         :param keep_on_completion:
+        :param max_samples_per_key: By default, the response of a sample query contains
+            up to `10` samples, with one sample per unique set of join keys. Use the
+            `size` parameter to get a smaller or larger set of samples. To retrieve more
+            than one sample per set of join keys, use the `max_samples_per_key` parameter.
+            Pipes are not supported for sample queries.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param result_position:
         :param runtime_mappings:
         :param size: For basic queries, the maximum number of matching events to return.
@@ -278,6 +309,8 @@ class EqlClient(NamespacedClient):
         __body: t.Dict[str, t.Any] = body if body is not None else {}
         if allow_no_indices is not None:
             __query["allow_no_indices"] = allow_no_indices
+        if ccs_minimize_roundtrips is not None:
+            __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if expand_wildcards is not None:
@@ -290,9 +323,17 @@ class EqlClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if not __body:
             if query is not None:
                 __body["query"] = query
+            if allow_partial_search_results is not None:
+                __body["allow_partial_search_results"] = allow_partial_search_results
+            if allow_partial_sequence_results is not None:
+                __body["allow_partial_sequence_results"] = (
+                    allow_partial_sequence_results
+                )
             if case_sensitive is not None:
                 __body["case_sensitive"] = case_sensitive
             if event_category_field is not None:
@@ -307,6 +348,8 @@ class EqlClient(NamespacedClient):
                 __body["keep_alive"] = keep_alive
             if keep_on_completion is not None:
                 __body["keep_on_completion"] = keep_on_completion
+            if max_samples_per_key is not None:
+                __body["max_samples_per_key"] = max_samples_per_key
             if result_position is not None:
                 __body["result_position"] = result_position
             if runtime_mappings is not None:
diff -pruN 8.17.2-2/elasticsearch/_async/client/esql.py 9.2.0-1/elasticsearch/_async/client/esql.py
--- 8.17.2-2/elasticsearch/_async/client/esql.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/esql.py	2025-10-28 16:50:53.000000000 +0000
@@ -20,7 +20,16 @@ import typing as t
 from elastic_transport import ObjectApiResponse
 
 from ._base import NamespacedClient
-from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters
+from .utils import (
+    SKIP_IN_PATH,
+    Stability,
+    _quote,
+    _rewrite_parameters,
+    _stability_warning,
+)
+
+if t.TYPE_CHECKING:
+    from elasticsearch.esql import ESQLBase
 
 
 class EsqlClient(NamespacedClient):
@@ -31,17 +40,22 @@ class EsqlClient(NamespacedClient):
             "columnar",
             "filter",
             "include_ccs_metadata",
+            "include_execution_metadata",
+            "keep_alive",
+            "keep_on_completion",
             "locale",
             "params",
             "profile",
             "tables",
+            "wait_for_completion_timeout",
         ),
         ignore_deprecated_options={"params"},
     )
     async def async_query(
         self,
         *,
-        query: t.Optional[str] = None,
+        query: t.Optional[t.Union[str, "ESQLBase"]] = None,
+        allow_partial_results: t.Optional[bool] = None,
         columnar: t.Optional[bool] = None,
         delimiter: t.Optional[str] = None,
         drop_null_columns: t.Optional[bool] = None,
@@ -58,12 +72,11 @@ class EsqlClient(NamespacedClient):
         ] = None,
         human: t.Optional[bool] = None,
         include_ccs_metadata: t.Optional[bool] = None,
+        include_execution_metadata: t.Optional[bool] = None,
         keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         keep_on_completion: t.Optional[bool] = None,
         locale: t.Optional[str] = None,
-        params: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
-        ] = None,
+        params: t.Optional[t.Sequence[t.Union[None, bool, float, int, str]]] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
         tables: t.Optional[
@@ -82,10 +95,15 @@ class EsqlClient(NamespacedClient):
           <p>The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/esql-async-query-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query>`_
 
         :param query: The ES|QL query API accepts an ES|QL query string in the query
             parameter, runs it, and returns the results.
+        :param allow_partial_results: If `true`, partial results will be returned if
+            there are shard failures, but the query can continue to execute on other
+            clusters and shards. If `false`, the query will fail if there are any failures.
+            To override the default behavior, you can set the `esql.query.allow_partial_results`
+            cluster setting to `false`.
         :param columnar: By default, ES|QL returns results as rows. For example, FROM
             returns each individual document as one row. For the JSON, YAML, CBOR and
             smile formats, ES|QL can return the results in a columnar fashion where one
@@ -98,8 +116,17 @@ class EsqlClient(NamespacedClient):
             which has the name of all the columns.
         :param filter: Specify a Query DSL query in the filter parameter to filter the
             set of documents that an ES|QL query runs on.
-        :param format: A short version of the Accept header, for example `json` or `yaml`.
-        :param include_ccs_metadata: When set to `true` and performing a cross-cluster
+        :param format: A short version of the Accept header, e.g. json, yaml. `csv`,
+            `tsv`, and `txt` formats will return results in a tabular format, excluding
+            other metadata fields from the response. For async requests, nothing will
+            be returned if the async query doesn't finish within the timeout. The query
+            ID and running status are available in the `X-Elasticsearch-Async-Id` and
+            `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively.
+        :param include_ccs_metadata: When set to `true` and performing a cross-cluster/cross-project
+            query, the response will include an extra `_clusters` object with information
+            about the clusters that participated in the search along with info such as
+            shards count.
+        :param include_execution_metadata: When set to `true` and performing a cross-cluster/cross-project
             query, the response will include an extra `_clusters` object with information
             about the clusters that participated in the search along with info such as
             shards count.
@@ -134,6 +161,8 @@ class EsqlClient(NamespacedClient):
         __path = "/_query/async"
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if allow_partial_results is not None:
+            __query["allow_partial_results"] = allow_partial_results
         if delimiter is not None:
             __query["delimiter"] = delimiter
         if drop_null_columns is not None:
@@ -146,23 +175,23 @@ class EsqlClient(NamespacedClient):
             __query["format"] = format
         if human is not None:
             __query["human"] = human
-        if keep_alive is not None:
-            __query["keep_alive"] = keep_alive
-        if keep_on_completion is not None:
-            __query["keep_on_completion"] = keep_on_completion
         if pretty is not None:
             __query["pretty"] = pretty
-        if wait_for_completion_timeout is not None:
-            __query["wait_for_completion_timeout"] = wait_for_completion_timeout
         if not __body:
             if query is not None:
-                __body["query"] = query
+                __body["query"] = str(query)
             if columnar is not None:
                 __body["columnar"] = columnar
             if filter is not None:
                 __body["filter"] = filter
             if include_ccs_metadata is not None:
                 __body["include_ccs_metadata"] = include_ccs_metadata
+            if include_execution_metadata is not None:
+                __body["include_execution_metadata"] = include_execution_metadata
+            if keep_alive is not None:
+                __body["keep_alive"] = keep_alive
+            if keep_on_completion is not None:
+                __body["keep_on_completion"] = keep_on_completion
             if locale is not None:
                 __body["locale"] = locale
             if params is not None:
@@ -171,6 +200,8 @@ class EsqlClient(NamespacedClient):
                 __body["profile"] = profile
             if tables is not None:
                 __body["tables"] = tables
+            if wait_for_completion_timeout is not None:
+                __body["wait_for_completion_timeout"] = wait_for_completion_timeout
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -205,7 +236,7 @@ class EsqlClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/esql-async-query-delete-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete>`_
 
         :param id: The unique identifier of the query. A query ID is provided in the
             ES|QL async query API response for a query that does not complete in the
@@ -243,6 +274,14 @@ class EsqlClient(NamespacedClient):
         drop_null_columns: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        format: t.Optional[
+            t.Union[
+                str,
+                t.Literal[
+                    "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml"
+                ],
+            ]
+        ] = None,
         human: t.Optional[bool] = None,
         keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
@@ -258,7 +297,7 @@ class EsqlClient(NamespacedClient):
           If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/esql-async-query-get-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get>`_
 
         :param id: The unique identifier of the query. A query ID is provided in the
             ES|QL async query API response for a query that does not complete in the
@@ -268,6 +307,7 @@ class EsqlClient(NamespacedClient):
             will be removed from the `columns` and `values` portion of the results. If
             `true`, the response will include an extra section under the name `all_columns`
             which has the name of all the columns.
+        :param format: A short version of the Accept header, for example `json` or `yaml`.
         :param keep_alive: The period for which the query and its results are stored
             in the cluster. When this period expires, the query and its results are deleted,
             even if the query is still ongoing.
@@ -288,6 +328,8 @@ class EsqlClient(NamespacedClient):
             __query["error_trace"] = error_trace
         if filter_path is not None:
             __query["filter_path"] = filter_path
+        if format is not None:
+            __query["format"] = format
         if human is not None:
             __query["human"] = human
         if keep_alive is not None:
@@ -306,12 +348,153 @@ class EsqlClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters()
+    async def async_query_stop(
+        self,
+        *,
+        id: str,
+        drop_null_columns: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Stop async ES|QL query.</p>
+          <p>This API interrupts the query execution and returns the results so far.
+          If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop>`_
+
+        :param id: The unique identifier of the query. A query ID is provided in the
+            ES|QL async query API response for a query that does not complete in the
+            designated time. A query ID is also provided when the request was submitted
+            with the `keep_on_completion` parameter set to `true`.
+        :param drop_null_columns: Indicates whether columns that are entirely `null`
+            will be removed from the `columns` and `values` portion of the results. If
+            `true`, the response will include an extra section under the name `all_columns`
+            which has the name of all the columns.
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'id'")
+        __path_parts: t.Dict[str, str] = {"id": _quote(id)}
+        __path = f'/_query/async/{__path_parts["id"]}/stop'
+        __query: t.Dict[str, t.Any] = {}
+        if drop_null_columns is not None:
+            __query["drop_null_columns"] = drop_null_columns
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="esql.async_query_stop",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def get_query(
+        self,
+        *,
+        id: str,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get a specific running ES|QL query information.
+          Returns an object extended information about a running ES|QL query.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-get-query>`_
+
+        :param id: The query ID
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'id'")
+        __path_parts: t.Dict[str, str] = {"id": _quote(id)}
+        __path = f'/_query/queries/{__path_parts["id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="esql.get_query",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def list_queries(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get running ES|QL queries information.
+          Returns an object containing IDs and other information about the running ES|QL queries.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-list-queries>`_
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_query/queries"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="esql.list_queries",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters(
         body_fields=(
             "query",
             "columnar",
             "filter",
             "include_ccs_metadata",
+            "include_execution_metadata",
             "locale",
             "params",
             "profile",
@@ -322,7 +505,8 @@ class EsqlClient(NamespacedClient):
     async def query(
         self,
         *,
-        query: t.Optional[str] = None,
+        query: t.Optional[t.Union[str, "ESQLBase"]] = None,
+        allow_partial_results: t.Optional[bool] = None,
         columnar: t.Optional[bool] = None,
         delimiter: t.Optional[str] = None,
         drop_null_columns: t.Optional[bool] = None,
@@ -339,9 +523,15 @@ class EsqlClient(NamespacedClient):
         ] = None,
         human: t.Optional[bool] = None,
         include_ccs_metadata: t.Optional[bool] = None,
+        include_execution_metadata: t.Optional[bool] = None,
         locale: t.Optional[str] = None,
         params: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[
+                t.Union[
+                    t.Sequence[t.Union[None, bool, float, int, str]],
+                    t.Union[None, bool, float, int, str],
+                ]
+            ]
         ] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
@@ -357,10 +547,15 @@ class EsqlClient(NamespacedClient):
           Get search results for an ES|QL (Elasticsearch query language) query.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/esql-rest.html>`_
+        `<https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest>`_
 
         :param query: The ES|QL query API accepts an ES|QL query string in the query
             parameter, runs it, and returns the results.
+        :param allow_partial_results: If `true`, partial results will be returned if
+            there are shard failures, but the query can continue to execute on other
+            clusters and shards. If `false`, the query will fail if there are any failures.
+            To override the default behavior, you can set the `esql.query.allow_partial_results`
+            cluster setting to `false`.
         :param columnar: By default, ES|QL returns results as rows. For example, FROM
             returns each individual document as one row. For the JSON, YAML, CBOR and
             smile formats, ES|QL can return the results in a columnar fashion where one
@@ -373,8 +568,14 @@ class EsqlClient(NamespacedClient):
             `all_columns` which has the name of all columns.
         :param filter: Specify a Query DSL query in the filter parameter to filter the
             set of documents that an ES|QL query runs on.
-        :param format: A short version of the Accept header, e.g. json, yaml.
-        :param include_ccs_metadata: When set to `true` and performing a cross-cluster
+        :param format: A short version of the Accept header, e.g. json, yaml. `csv`,
+            `tsv`, and `txt` formats will return results in a tabular format, excluding
+            other metadata fields from the response.
+        :param include_ccs_metadata: When set to `true` and performing a cross-cluster/cross-project
+            query, the response will include an extra `_clusters` object with information
+            about the clusters that participated in the search along with info such as
+            shards count.
+        :param include_execution_metadata: When set to `true` and performing a cross-cluster/cross-project
             query, the response will include an extra `_clusters` object with information
             about the clusters that participated in the search along with info such as
             shards count.
@@ -395,6 +596,8 @@ class EsqlClient(NamespacedClient):
         __path = "/_query"
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if allow_partial_results is not None:
+            __query["allow_partial_results"] = allow_partial_results
         if delimiter is not None:
             __query["delimiter"] = delimiter
         if drop_null_columns is not None:
@@ -411,13 +614,15 @@ class EsqlClient(NamespacedClient):
             __query["pretty"] = pretty
         if not __body:
             if query is not None:
-                __body["query"] = query
+                __body["query"] = str(query)
             if columnar is not None:
                 __body["columnar"] = columnar
             if filter is not None:
                 __body["filter"] = filter
             if include_ccs_metadata is not None:
                 __body["include_ccs_metadata"] = include_ccs_metadata
+            if include_execution_metadata is not None:
+                __body["include_execution_metadata"] = include_execution_metadata
             if locale is not None:
                 __body["locale"] = locale
             if params is not None:
diff -pruN 8.17.2-2/elasticsearch/_async/client/features.py 9.2.0-1/elasticsearch/_async/client/features.py
--- 8.17.2-2/elasticsearch/_async/client/features.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/features.py	2025-10-28 16:50:53.000000000 +0000
@@ -32,6 +32,7 @@ class FeaturesClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -47,7 +48,9 @@ class FeaturesClient(NamespacedClient):
           In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-features-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features>`_
+
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_features"
@@ -58,6 +61,8 @@ class FeaturesClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -78,6 +83,7 @@ class FeaturesClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -96,7 +102,9 @@ class FeaturesClient(NamespacedClient):
           <p>IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features>`_
+
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_features/_reset"
@@ -107,6 +115,8 @@ class FeaturesClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
diff -pruN 8.17.2-2/elasticsearch/_async/client/fleet.py 9.2.0-1/elasticsearch/_async/client/fleet.py
--- 8.17.2-2/elasticsearch/_async/client/fleet.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/fleet.py	2025-10-28 16:50:53.000000000 +0000
@@ -48,10 +48,12 @@ class FleetClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project.</p>
+          <p>Get global checkpoints.</p>
+          <p>Get the current global checkpoints for an index.
+          This API is designed for internal use by the Fleet server project.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-global-checkpoints.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet>`_
 
         :param index: A single index or index alias that resolves to a single index.
         :param checkpoints: A comma separated list of previous global checkpoints. When
@@ -136,11 +138,14 @@ class FleetClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Executes several <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html">fleet searches</a> with a single API request.
-          The API follows the same structure as the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">multi search</a> API. However, similar to the fleet search API, it
-          supports the wait_for_checkpoints parameter.</p>
+          <p>Run multiple Fleet searches.
+          Run several Fleet searches with a single API request.
+          The API follows the same structure as the multi search API.
+          However, similar to the Fleet search API, it supports the <code>wait_for_checkpoints</code> parameter.</p>
 
 
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch>`_
+
         :param searches:
         :param index: A single target to search. If the target is an index alias, it
             must resolve to a single index.
@@ -150,9 +155,9 @@ class FleetClient(NamespacedClient):
             example, a request targeting foo*,bar* returns an error if an index starts
             with foo but no index starts with bar.
         :param allow_partial_search_results: If true, returns partial results if there
-            are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
-            If false, returns an error with no partial results. Defaults to the configured
-            cluster setting `search.default_allow_partial_results` which is true by default.
+            are shard request timeouts or shard failures. If false, returns an error
+            with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`,
+            which is true by default.
         :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating
             node and remote clusters are minimized for cross-cluster search requests.
         :param expand_wildcards: Type of index that wildcard expressions can match. If
@@ -326,7 +331,6 @@ class FleetClient(NamespacedClient):
         indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None,
         lenient: t.Optional[bool] = None,
         max_concurrent_shard_requests: t.Optional[int] = None,
-        min_compatible_shard_node: t.Optional[str] = None,
         min_score: t.Optional[float] = None,
         pit: t.Optional[t.Mapping[str, t.Any]] = None,
         post_filter: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -346,7 +350,7 @@ class FleetClient(NamespacedClient):
         script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
@@ -384,9 +388,12 @@ class FleetClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>The purpose of the fleet search api is to provide a search api where the search will only be executed
-          after provided checkpoint has been processed and is visible for searches inside of Elasticsearch.</p>
+          <p>Run a Fleet search.
+          The purpose of the Fleet search API is to provide an API where the search will be run only
+          after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.</p>
+
 
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search>`_
 
         :param index: A single target to search. If the target is an index alias, it
             must resolve to a single index.
@@ -394,9 +401,9 @@ class FleetClient(NamespacedClient):
         :param aggs:
         :param allow_no_indices:
         :param allow_partial_search_results: If true, returns partial results if there
-            are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
-            If false, returns an error with no partial results. Defaults to the configured
-            cluster setting `search.default_allow_partial_results` which is true by default.
+            are shard request timeouts or shard failures. If false, returns an error
+            with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`,
+            which is true by default.
         :param analyze_wildcard:
         :param analyzer:
         :param batched_reduce_size:
@@ -422,9 +429,8 @@ class FleetClient(NamespacedClient):
         :param indices_boost: Boosts the _score of documents from specified indices.
         :param lenient:
         :param max_concurrent_shard_requests:
-        :param min_compatible_shard_node:
         :param min_score: Minimum _score for matching documents. Documents with a lower
-            _score are not included in the search results.
+            _score are not included in search results and results collected by aggregations.
         :param pit: Limits the search to a point in time (PIT). If you provide a PIT,
             you cannot specify an <index> in the request path.
         :param post_filter:
@@ -537,8 +543,6 @@ class FleetClient(NamespacedClient):
             __query["lenient"] = lenient
         if max_concurrent_shard_requests is not None:
             __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests
-        if min_compatible_shard_node is not None:
-            __query["min_compatible_shard_node"] = min_compatible_shard_node
         if pre_filter_shard_size is not None:
             __query["pre_filter_shard_size"] = pre_filter_shard_size
         if preference is not None:
@@ -638,11 +642,7 @@ class FleetClient(NamespacedClient):
                 __body["track_total_hits"] = track_total_hits
             if version is not None:
                 __body["version"] = version
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
diff -pruN 8.17.2-2/elasticsearch/_async/client/graph.py 9.2.0-1/elasticsearch/_async/client/graph.py
--- 8.17.2-2/elasticsearch/_async/client/graph.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/graph.py	2025-10-28 16:50:53.000000000 +0000
@@ -55,7 +55,7 @@ class GraphClient(NamespacedClient):
           You can exclude vertices that have already been returned.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/graph-explore-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph>`_
 
         :param index: Name of the index.
         :param connections: Specifies or more fields from which you want to extract terms
@@ -97,11 +97,7 @@ class GraphClient(NamespacedClient):
                 __body["query"] = query
             if vertices is not None:
                 __body["vertices"] = vertices
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
diff -pruN 8.17.2-2/elasticsearch/_async/client/ilm.py 9.2.0-1/elasticsearch/_async/client/ilm.py
--- 8.17.2-2/elasticsearch/_async/client/ilm.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/ilm.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class IlmClient(NamespacedClient):
           You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-delete-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle>`_
 
         :param name: Identifier for the policy.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -92,7 +92,6 @@ class IlmClient(NamespacedClient):
         only_errors: t.Optional[bool] = None,
         only_managed: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -103,7 +102,7 @@ class IlmClient(NamespacedClient):
           <p>The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-explain-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases to target.
             Supports wildcards (`*`). To target all data streams and indices, use `*`
@@ -116,8 +115,6 @@ class IlmClient(NamespacedClient):
             while executing the policy, or attempting to use a policy that does not exist.
         :param only_managed: Filters the returned indices to only indices that are managed
             by ILM.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -138,8 +135,6 @@ class IlmClient(NamespacedClient):
             __query["only_managed"] = only_managed
         if pretty is not None:
             __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -168,7 +163,7 @@ class IlmClient(NamespacedClient):
           <p>Get lifecycle policies.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-get-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle>`_
 
         :param name: Identifier for the policy.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -219,11 +214,11 @@ class IlmClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get the ILM status.
-          Get the current index lifecycle management status.</p>
+          <p>Get the ILM status.</p>
+          <p>Get the current index lifecycle management status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-get-status.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ilm/status"
@@ -257,6 +252,7 @@ class IlmClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         legacy_template_to_delete: t.Optional[str] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         node_attribute: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
@@ -279,12 +275,16 @@ class IlmClient(NamespacedClient):
           Use the stop ILM and get ILM status APIs to wait until the reported operation mode is <code>STOPPED</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-migrate-to-data-tiers.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers>`_
 
         :param dry_run: If true, simulates the migration from node attributes based allocation
             filters to data tiers, but does not perform the migration. This provides
             a way to retrieve the indices and ILM policies that need to be migrated.
         :param legacy_template_to_delete:
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error. It can also be set to `-1` to indicate that the request
+            should never timeout.
         :param node_attribute:
         """
         __path_parts: t.Dict[str, str] = {}
@@ -299,6 +299,8 @@ class IlmClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -352,7 +354,7 @@ class IlmClient(NamespacedClient):
           An index cannot move to a step that is not part of its policy.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-move-to-step.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step>`_
 
         :param index: The name of the index whose lifecycle step is to change
         :param current_step: The step that the index is expected to be in.
@@ -381,11 +383,7 @@ class IlmClient(NamespacedClient):
                 __body["current_step"] = current_step
             if next_step is not None:
                 __body["next_step"] = next_step
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -420,7 +418,7 @@ class IlmClient(NamespacedClient):
           <p>NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-put-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle>`_
 
         :param name: Identifier for the policy.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -451,11 +449,7 @@ class IlmClient(NamespacedClient):
         if not __body:
             if policy is not None:
                 __body["policy"] = policy
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -484,7 +478,7 @@ class IlmClient(NamespacedClient):
           It also stops managing the indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-remove-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy>`_
 
         :param index: The name of the index to remove policy on
         """
@@ -530,7 +524,7 @@ class IlmClient(NamespacedClient):
           Use the explain lifecycle state API to determine whether an index is in the ERROR step.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-retry-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry>`_
 
         :param index: The name of the indices (comma-separated) whose failed lifecycle
             step is to be retry
@@ -578,10 +572,13 @@ class IlmClient(NamespacedClient):
           Restarting ILM is necessary only when it has been stopped using the stop ILM API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-start.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start>`_
 
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ilm/start"
@@ -629,10 +626,13 @@ class IlmClient(NamespacedClient):
           Use the get ILM status API to check whether ILM is running.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-stop.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop>`_
 
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ilm/stop"
diff -pruN 8.17.2-2/elasticsearch/_async/client/indices.py 9.2.0-1/elasticsearch/_async/client/indices.py
--- 8.17.2-2/elasticsearch/_async/client/indices.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/indices.py	2025-10-28 16:50:53.000000000 +0000
@@ -57,23 +57,40 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Add an index block.
-          Limits the operations allowed on an index by blocking specific operation types.</p>
+          <p>Add an index block.</p>
+          <p>Add an index block to an index.
+          Index blocks limit the operations allowed on an index by blocking specific operation types.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/index-modules-blocks.html>`_
-
-        :param index: A comma separated list of indices to add a block to
-        :param block: The block to add (one of read, write, read_only or metadata)
-        :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
-            into no concrete indices. (This includes `_all` string or when no indices
-            have been specified)
-        :param expand_wildcards: Whether to expand wildcard expression to concrete indices
-            that are open, closed or both.
-        :param ignore_unavailable: Whether specified concrete indices should be ignored
-            when unavailable (missing or closed)
-        :param master_timeout: Specify timeout for connection to master
-        :param timeout: Explicit operation timeout
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block>`_
+
+        :param index: A comma-separated list or wildcard expression of index names used
+            to limit the request. By default, you must explicitly name the indices you
+            are adding blocks to. To allow the adding of blocks to indices with `_all`,
+            `*`, or other wildcard expressions, change the `action.destructive_requires_name`
+            setting to `false`. You can update this setting in the `elasticsearch.yml`
+            file or by using the cluster update settings API.
+        :param block: The block type to add to the index.
+        :param allow_no_indices: If `false`, the request returns an error if any wildcard
+            expression, index alias, or `_all` value targets only missing or closed indices.
+            This behavior applies even if the request targets other open indices. For
+            example, a request targeting `foo*,bar*` returns an error if an index starts
+            with `foo` but no index starts with `bar`.
+        :param expand_wildcards: The type of index that wildcard patterns can match.
+            If the request can target data streams, this argument determines whether
+            wildcard expressions match hidden data streams. It supports comma-separated
+            values, such as `open,hidden`.
+        :param ignore_unavailable: If `false`, the request returns an error if it targets
+            a missing or closed index.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. It can also be set to `-1` to indicate that the request should
+            never timeout.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. It can
+            also be set to `-1` to indicate that the request should never timeout.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -156,7 +173,7 @@ class IndicesClient(NamespacedClient):
           The <code>_analyze</code> endpoint without a specified index will always use <code>10000</code> as its limit.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-analyze.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze>`_
 
         :param index: Index used to derive the analyzer. If specified, the `analyzer`
             or field parameter overrides this value. If no index is specified or the
@@ -215,11 +232,7 @@ class IndicesClient(NamespacedClient):
                 __body["text"] = text
             if tokenizer is not None:
                 __body["tokenizer"] = tokenizer
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -231,6 +244,51 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def cancel_migrate_reindex(
+        self,
+        *,
+        index: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Cancel a migration reindex operation.</p>
+          <p>Cancel a migration reindex attempt for a data stream or index.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex>`_
+
+        :param index: The index or data stream name
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'index'")
+        __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+        __path = f'/_migration/reindex/{__path_parts["index"]}/_cancel'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.cancel_migrate_reindex",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     async def clear_cache(
         self,
         *,
@@ -265,7 +323,7 @@ class IndicesClient(NamespacedClient):
           To clear the cache only of specific fields, use the <code>fields</code> parameter.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-clearcache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -276,7 +334,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param fielddata: If `true`, clears the fields cache. Use the `fields` parameter
             to clear the cache of specific fields only.
         :param fields: Comma-separated list of field names used to limit the `fielddata`
@@ -387,7 +445,7 @@ class IndicesClient(NamespacedClient):
           <p>Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-clone-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone>`_
 
         :param index: Name of the source index to clone.
         :param target: Name of the target index to create.
@@ -491,7 +549,7 @@ class IndicesClient(NamespacedClient):
           Closing indices can be turned off with the cluster settings API by setting <code>cluster.indices.close.enable</code> to <code>false</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-close.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close>`_
 
         :param index: Comma-separated list or wildcard expression of index names used
             to limit the request.
@@ -501,7 +559,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -592,9 +650,17 @@ class IndicesClient(NamespacedClient):
           Note that changing this setting will also affect the <code>wait_for_active_shards</code> value on all subsequent write operations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-create-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create>`_
 
-        :param index: Name of the index you wish to create.
+        :param index: Name of the index you wish to create. Index names must meet the
+            following criteria: * Lowercase only * Cannot include `\\`, `/`, `*`, `?`,
+            `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` * Indices prior to
+            7.0 could contain a colon (`:`), but that has been deprecated and will not
+            be supported in later versions * Cannot start with `-`, `_`, or `+` * Cannot
+            be `.` or `..` * Cannot be longer than 255 bytes (note thtat it is bytes,
+            so multi-byte characters will reach the limit faster) * Names starting with
+            `.` are deprecated, except for hidden indices and internal indices managed
+            by plugins
         :param aliases: Aliases for the index.
         :param mappings: Mapping for fields in the index. If specified, this mapping
             can include: - Field names - Field data types - Mapping parameters
@@ -665,12 +731,11 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Create a data stream.
-          Creates a data stream.
-          You must have a matching index template with data stream enabled.</p>
+          <p>Create a data stream.</p>
+          <p>You must have a matching index template with data stream enabled.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream>`_
 
         :param name: Name of the data stream, which must meet the following criteria:
             Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`,
@@ -710,6 +775,71 @@ class IndicesClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters(
+        body_name="create_from",
+    )
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def create_from(
+        self,
+        *,
+        source: str,
+        dest: str,
+        create_from: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an index from a source index.</p>
+          <p>Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from>`_
+
+        :param source: The source index or data stream name
+        :param dest: The destination index or data stream name
+        :param create_from:
+        """
+        if source in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'source'")
+        if dest in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'dest'")
+        if create_from is not None and body is not None:
+            raise ValueError("Cannot set both 'create_from' and 'body'")
+        __path_parts: t.Dict[str, str] = {
+            "source": _quote(source),
+            "dest": _quote(dest),
+        }
+        __path = f'/_create_from/{__path_parts["source"]}/{__path_parts["dest"]}'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __body = create_from if create_from is not None else body
+        if not __body:
+            __body = None
+        __headers = {"accept": "application/json"}
+        if __body is not None:
+            __headers["content-type"] = "application/json"
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.create_from",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters()
     async def data_streams_stats(
         self,
@@ -731,11 +861,11 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get data stream stats.
-          Retrieves statistics for one or more data streams.</p>
+          <p>Get data stream stats.</p>
+          <p>Get statistics for one or more data streams.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1>`_
 
         :param name: Comma-separated list of data streams used to limit the request.
             Wildcard expressions (`*`) are supported. To target all data streams in a
@@ -804,7 +934,7 @@ class IndicesClient(NamespacedClient):
           You can then use the delete index API to delete the previous write index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete>`_
 
         :param index: Comma-separated list of indices to delete. You cannot specify index
             aliases. By default, this parameter does not support wildcards (`*`) or `_all`.
@@ -816,7 +946,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -878,7 +1008,7 @@ class IndicesClient(NamespacedClient):
           Removes a data stream or index from an alias.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-alias.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias>`_
 
         :param index: Comma-separated list of data streams or indices used to limit the
             request. Supports wildcards (`*`).
@@ -946,7 +1076,7 @@ class IndicesClient(NamespacedClient):
           Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-delete-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle>`_
 
         :param name: A comma-separated list of data streams of which the data stream
             lifecycle will be deleted; use `*` to get all data streams
@@ -1010,7 +1140,7 @@ class IndicesClient(NamespacedClient):
           Deletes one or more data streams and their backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream>`_
 
         :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions
             are supported.
@@ -1048,6 +1178,71 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    async def delete_data_stream_options(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Delete data stream options.
+          Removes the data stream options from a data stream.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream-options>`_
+
+        :param name: A comma-separated list of data streams of which the data stream
+            options will be deleted; use `*` to get all data streams
+        :param expand_wildcards: Whether wildcard expressions should get expanded to
+            open or closed indices (default: open)
+        :param master_timeout: Specify timeout for connection to master
+        :param timeout: Explicit timestamp for the document
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_options'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "DELETE",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.delete_data_stream_options",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     async def delete_index_template(
         self,
         *,
@@ -1068,7 +1263,7 @@ class IndicesClient(NamespacedClient):
           existing templates.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template>`_
 
         :param name: Comma-separated list of index template names used to limit the request.
             Wildcard (*) expressions are supported.
@@ -1120,10 +1315,11 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a legacy index template.</p>
+          <p>Delete a legacy index template.
+          IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template-v1.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template>`_
 
         :param name: The name of the legacy index template to delete. Wildcard (`*`)
             expressions are supported.
@@ -1193,9 +1389,10 @@ class IndicesClient(NamespacedClient):
           <p>NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index <code>store_size</code> value because some small metadata files are ignored and some parts of data files might not be scanned by the API.
           Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate.
           The stored size of the <code>_id</code> field is likely underestimated while the <code>_source</code> field is overestimated.</p>
+          <p>For usage examples see the External documentation or refer to <a href="https://www.elastic.co/docs/reference/elasticsearch/rest-apis/index-disk-usage">Analyze the index disk usage example</a> for an example.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-disk-usage.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. It’s recommended to execute this API with a single
@@ -1278,7 +1475,7 @@ class IndicesClient(NamespacedClient):
           The source index must be read only (<code>index.blocks.write: true</code>).</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-downsample-data-stream.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample>`_
 
         :param index: Name of the time series index to downsample.
         :param target_index: Name of the index to create.
@@ -1350,7 +1547,7 @@ class IndicesClient(NamespacedClient):
           Check if one or more indices, index aliases, or data streams exist.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-exists.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases. Supports
             wildcards (`*`).
@@ -1360,7 +1557,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param flat_settings: If `true`, returns settings in flat format.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
@@ -1422,17 +1619,17 @@ class IndicesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
-        local: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> HeadApiResponse:
         """
         .. raw:: html
 
-          <p>Check aliases.
-          Checks if one or more data stream or index aliases exist.</p>
+          <p>Check aliases.</p>
+          <p>Check if one or more data stream or index aliases exist.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias>`_
 
         :param name: Comma-separated list of aliases to check. Supports wildcards (`*`).
         :param index: Comma-separated list of data streams or indices used to limit the
@@ -1444,11 +1641,12 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, requests that include a missing data stream
             or index in the target indices or data streams return an error.
-        :param local: If `true`, the request retrieves information from the local node
-            only.
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -1474,8 +1672,8 @@ class IndicesClient(NamespacedClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
-        if local is not None:
-            __query["local"] = local
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -1495,21 +1693,27 @@ class IndicesClient(NamespacedClient):
         name: str,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        flat_settings: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
+        local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> HeadApiResponse:
         """
         .. raw:: html
 
-          <p>Check index templates.
-          Check whether index templates exist.</p>
+          <p>Check index templates.</p>
+          <p>Check whether index templates exist.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/index-templates.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template>`_
 
         :param name: Comma-separated list of index template names used to limit the request.
             Wildcard (*) expressions are supported.
+        :param flat_settings: If true, returns settings in flat format.
+        :param local: If true, the request retrieves information from the local node
+            only. Defaults to false, which means information is retrieved from the master
+            node.
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
@@ -1523,8 +1727,12 @@ class IndicesClient(NamespacedClient):
             __query["error_trace"] = error_trace
         if filter_path is not None:
             __query["filter_path"] = filter_path
+        if flat_settings is not None:
+            __query["flat_settings"] = flat_settings
         if human is not None:
             __query["human"] = human
+        if local is not None:
+            __query["local"] = local
         if master_timeout is not None:
             __query["master_timeout"] = master_timeout
         if pretty is not None:
@@ -1561,7 +1769,7 @@ class IndicesClient(NamespacedClient):
           <p>IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-template-exists-v1.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template>`_
 
         :param name: A comma-separated list of index template names used to limit the
             request. Wildcard (`*`) expressions are supported.
@@ -1619,7 +1827,7 @@ class IndicesClient(NamespacedClient):
           Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-explain-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle>`_
 
         :param index: The name of the index to explain
         :param include_defaults: indicates if the API should return the default values
@@ -1673,12 +1881,7 @@ class IndicesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        wait_for_active_shards: t.Optional[
-            t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
-        ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -1691,7 +1894,7 @@ class IndicesClient(NamespacedClient):
           A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/field-usage-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats>`_
 
         :param index: Comma-separated list or wildcard expression of index names used
             to limit the request.
@@ -1708,14 +1911,6 @@ class IndicesClient(NamespacedClient):
             in the statistics.
         :param ignore_unavailable: If `true`, missing or closed indices are not included
             in the response.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
-        :param wait_for_active_shards: The number of shard copies that must be active
-            before proceeding with the operation. Set to all or any positive integer
-            up to the total number of shards in the index (`number_of_replicas+1`).
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -1736,14 +1931,8 @@ class IndicesClient(NamespacedClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
-        if wait_for_active_shards is not None:
-            __query["wait_for_active_shards"] = wait_for_active_shards
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -1790,7 +1979,7 @@ class IndicesClient(NamespacedClient):
           If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-flush.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases to flush.
             Supports wildcards (`*`). To flush all data streams and indices, omit this
@@ -1801,7 +1990,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param force: If `true`, the request forces a flush even if there are no changes
             to commit to the index.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
@@ -1915,7 +2104,7 @@ class IndicesClient(NamespacedClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge>`_
 
         :param index: A comma-separated list of index names; use `_all` or empty string
             to perform the operation on all indices
@@ -2013,7 +2202,7 @@ class IndicesClient(NamespacedClient):
           stream’s backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get>`_
 
         :param index: Comma-separated list of data streams, indices, and index aliases
             used to limit the request. Wildcard expressions (*) are supported.
@@ -2096,7 +2285,7 @@ class IndicesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
-        local: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -2106,7 +2295,7 @@ class IndicesClient(NamespacedClient):
           Retrieves information for one or more data stream or index aliases.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-alias.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias>`_
 
         :param index: Comma-separated list of data streams or indices used to limit the
             request. Supports wildcards (`*`). To target all data streams and indices,
@@ -2119,11 +2308,12 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
-        :param local: If `true`, the request retrieves information from the local node
-            only.
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
         """
         __path_parts: t.Dict[str, str]
         if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH:
@@ -2151,8 +2341,8 @@ class IndicesClient(NamespacedClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
-        if local is not None:
-            __query["local"] = local
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -2188,18 +2378,17 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get data stream lifecycles.
-          Retrieves the data stream lifecycle configuration of one or more data streams.</p>
+          <p>Get data stream lifecycles.</p>
+          <p>Get the data stream lifecycle configuration of one or more data streams.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-get-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle>`_
 
         :param name: Comma-separated list of data streams to limit the request. Supports
             wildcards (`*`). To target all data streams, omit this parameter or use `*`
             or `_all`.
         :param expand_wildcards: Type of data stream that wildcard patterns can match.
-            Supports comma-separated values, such as `open,hidden`. Valid values are:
-            `all`, `open`, `closed`, `hidden`, `none`.
+            Supports comma-separated values, such as `open,hidden`.
         :param include_defaults: If `true`, return all default settings in the response.
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
@@ -2250,7 +2439,7 @@ class IndicesClient(NamespacedClient):
           Get statistics about the data streams that are managed by a data stream lifecycle.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-get-lifecycle-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_lifecycle/stats"
@@ -2297,11 +2486,11 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get data streams.
-          Retrieves information about one or more data streams.</p>
+          <p>Get data streams.</p>
+          <p>Get information about one or more data streams.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream>`_
 
         :param name: Comma-separated list of data stream names used to limit the request.
             Wildcard (`*`) expressions are supported. If omitted, all data streams are
@@ -2351,6 +2540,172 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    async def get_data_stream_mappings(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get data stream mappings.</p>
+          <p>Get mapping information for one or more data streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-mappings>`_
+
+        :param name: A comma-separated list of data streams or data stream patterns.
+            Supports wildcards (`*`).
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_mappings'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.get_data_stream_mappings",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    async def get_data_stream_options(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get data stream options.</p>
+          <p>Get the data stream options configuration of one or more data streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-options>`_
+
+        :param name: Comma-separated list of data streams to limit the request. Supports
+            wildcards (`*`). To target all data streams, omit this parameter or use `*`
+            or `_all`.
+        :param expand_wildcards: Type of data stream that wildcard patterns can match.
+            Supports comma-separated values, such as `open,hidden`.
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_options'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.get_data_stream_options",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    async def get_data_stream_settings(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get data stream settings.</p>
+          <p>Get setting information for one or more data streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings>`_
+
+        :param name: A comma-separated list of data streams or data stream patterns.
+            Supports wildcards (`*`).
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_settings'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.get_data_stream_settings",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     async def get_field_mapping(
         self,
         *,
@@ -2370,7 +2725,6 @@ class IndicesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         include_defaults: t.Optional[bool] = None,
-        local: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -2382,7 +2736,7 @@ class IndicesClient(NamespacedClient):
           <p>This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-field-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping>`_
 
         :param fields: Comma-separated list or wildcard expression of fields used to
             limit returned information. Supports wildcards (`*`).
@@ -2395,12 +2749,10 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param include_defaults: If `true`, return all default settings in the response.
-        :param local: If `true`, the request retrieves information from the local node
-            only.
         """
         if fields in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'fields'")
@@ -2428,8 +2780,6 @@ class IndicesClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if include_defaults is not None:
             __query["include_defaults"] = include_defaults
-        if local is not None:
-            __query["local"] = local
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -2463,7 +2813,7 @@ class IndicesClient(NamespacedClient):
           Get information about one or more index templates.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template>`_
 
         :param name: Comma-separated list of index template names used to limit the request.
             Wildcard (*) expressions are supported.
@@ -2540,7 +2890,7 @@ class IndicesClient(NamespacedClient):
           For data streams, the API retrieves mappings for the stream’s backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -2551,7 +2901,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param local: If `true`, the request retrieves information from the local node
@@ -2597,6 +2947,51 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def get_migrate_reindex_status(
+        self,
+        *,
+        index: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get the migration reindexing status.</p>
+          <p>Get the status of a migration reindex attempt for a data stream or index.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration>`_
+
+        :param index: The index or data stream name.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'index'")
+        __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+        __path = f'/_migration/reindex/{__path_parts["index"]}/_status'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.get_migrate_reindex_status",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     async def get_settings(
         self,
         *,
@@ -2629,7 +3024,7 @@ class IndicesClient(NamespacedClient):
           For data streams, it returns setting information for the stream's backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -2716,12 +3111,12 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get index templates.
+          <p>Get legacy index templates.
           Get information about one or more index templates.</p>
           <p>IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template>`_
 
         :param name: Comma-separated list of index template names used to limit the request.
             Wildcard (`*`) expressions are supported. To return all index templates,
@@ -2765,6 +3160,62 @@ class IndicesClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters(
+        body_name="reindex",
+    )
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def migrate_reindex(
+        self,
+        *,
+        reindex: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Reindex legacy backing indices.</p>
+          <p>Reindex all legacy backing indices for a data stream.
+          This operation occurs in a persistent task.
+          The persistent task ID is returned immediately and the reindexing work is completed in that task.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex>`_
+
+        :param reindex:
+        """
+        if reindex is None and body is None:
+            raise ValueError(
+                "Empty value passed for parameters 'reindex' and 'body', one of them should be set."
+            )
+        elif reindex is not None and body is not None:
+            raise ValueError("Cannot set both 'reindex' and 'body'")
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_migration/reindex"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __body = reindex if reindex is not None else body
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.migrate_reindex",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters()
     async def migrate_to_data_stream(
         self,
@@ -2793,7 +3244,7 @@ class IndicesClient(NamespacedClient):
           The write index for the alias becomes the write index for the stream.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream>`_
 
         :param name: Name of the index alias to convert to a data stream.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -2849,7 +3300,7 @@ class IndicesClient(NamespacedClient):
           Performs one or more data stream modification actions in a single atomic operation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream>`_
 
         :param actions: Actions to perform.
         """
@@ -2928,7 +3379,7 @@ class IndicesClient(NamespacedClient):
           <p>Because opening or closing an index allocates its shards, the <code>wait_for_active_shards</code> setting on index creation applies to the <code>_open</code> and <code>_close</code> index actions as well.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-open-close.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). By default, you must explicitly
@@ -2942,7 +3393,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -3014,7 +3465,7 @@ class IndicesClient(NamespacedClient):
           This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream>`_
 
         :param name: The name of the data stream
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -3080,7 +3531,7 @@ class IndicesClient(NamespacedClient):
           Adds a data stream or index to an alias.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias>`_
 
         :param index: Comma-separated list of data streams or indices to add. Supports
             wildcards (`*`). Wildcard patterns that match both data streams and indices
@@ -3155,7 +3606,7 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("data_retention", "downsampling"),
+        body_fields=("data_retention", "downsampling", "enabled"),
     )
     async def put_data_lifecycle(
         self,
@@ -3163,6 +3614,7 @@ class IndicesClient(NamespacedClient):
         name: t.Union[str, t.Sequence[str]],
         data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         downsampling: t.Optional[t.Mapping[str, t.Any]] = None,
+        enabled: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         expand_wildcards: t.Optional[
             t.Union[
@@ -3186,7 +3638,7 @@ class IndicesClient(NamespacedClient):
           Update the data stream lifecycle of the specified data streams.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-put-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle>`_
 
         :param name: Comma-separated list of data streams used to limit the request.
             Supports wildcards (`*`). To target all data streams use `*` or `_all`.
@@ -3194,12 +3646,13 @@ class IndicesClient(NamespacedClient):
             be stored at least for this time frame. Any time after this duration the
             document could be deleted. When empty, every document in this data stream
             will be stored indefinitely.
-        :param downsampling: If defined, every backing index will execute the configured
-            downsampling configuration after the backing index is not the data stream
-            write index anymore.
+        :param downsampling: The downsampling configuration to execute for the managed
+            backing index after rollover.
+        :param enabled: If defined, it turns data stream lifecycle on/off (`true`/`false`)
+            for this data stream. A data stream lifecycle that's disabled (enabled: `false`)
+            will have no effect on the data stream.
         :param expand_wildcards: Type of data stream that wildcard patterns can match.
-            Supports comma-separated values, such as `open,hidden`. Valid values are:
-            `all`, `hidden`, `open`, `closed`, `none`.
+            Supports comma-separated values, such as `open,hidden`.
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
@@ -3231,11 +3684,9 @@ class IndicesClient(NamespacedClient):
                 __body["data_retention"] = data_retention
             if downsampling is not None:
                 __body["downsampling"] = downsampling
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+            if enabled is not None:
+                __body["enabled"] = enabled
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -3247,6 +3698,240 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
+        body_name="mappings",
+    )
+    async def put_data_stream_mappings(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        mappings: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
+        dry_run: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Update data stream mappings.</p>
+          <p>This API can be used to override mappings on specific data streams. These overrides will take precedence over what
+          is specified in the template that the data stream matches. The mapping change is only applied to new write indices
+          that are created during rollover after this API is called. No indices are changed by this API.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-mappings>`_
+
+        :param name: A comma-separated list of data streams or data stream patterns.
+        :param mappings:
+        :param dry_run: If `true`, the request does not actually change the mappings
+            on any data streams. Instead, it simulates changing the settings and reports
+            back to the user what would have happened had these settings actually been
+            applied.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        if mappings is None and body is None:
+            raise ValueError(
+                "Empty value passed for parameters 'mappings' and 'body', one of them should be set."
+            )
+        elif mappings is not None and body is not None:
+            raise ValueError("Cannot set both 'mappings' and 'body'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_mappings'
+        __query: t.Dict[str, t.Any] = {}
+        if dry_run is not None:
+            __query["dry_run"] = dry_run
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __body = mappings if mappings is not None else body
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.put_data_stream_mappings",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("failure_store",),
+    )
+    async def put_data_stream_options(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
+        failure_store: t.Optional[t.Mapping[str, t.Any]] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Update data stream options.
+          Update the data stream options of the specified data streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-options>`_
+
+        :param name: Comma-separated list of data streams used to limit the request.
+            Supports wildcards (`*`). To target all data streams use `*` or `_all`.
+        :param expand_wildcards: Type of data stream that wildcard patterns can match.
+            Supports comma-separated values, such as `open,hidden`.
+        :param failure_store: If defined, it will update the failure store configuration
+            of every data stream resolved by the name expression.
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_options'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if failure_store is not None:
+                __body["failure_store"] = failure_store
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.put_data_stream_options",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_name="settings",
+    )
+    async def put_data_stream_settings(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
+        dry_run: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Update data stream settings.</p>
+          <p>This API can be used to override settings on specific data streams. These overrides will take precedence over what
+          is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state,
+          only certain settings are allowed. If possible, the setting change is applied to all
+          backing indices. Otherwise, it will be applied when the data stream is next rolled over.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings>`_
+
+        :param name: A comma-separated list of data streams or data stream patterns.
+        :param settings:
+        :param dry_run: If `true`, the request does not actually change the settings
+            on any data streams or indices. Instead, it simulates changing the settings
+            and reports back to the user what would have happened had these settings
+            actually been applied.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        if settings is None and body is None:
+            raise ValueError(
+                "Empty value passed for parameters 'settings' and 'body', one of them should be set."
+            )
+        elif settings is not None and body is not None:
+            raise ValueError("Cannot set both 'settings' and 'body'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_settings'
+        __query: t.Dict[str, t.Any] = {}
+        if dry_run is not None:
+            __query["dry_run"] = dry_run
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __body = settings if settings is not None else body
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.put_data_stream_settings",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
         body_fields=(
             "allow_auto_create",
             "composed_of",
@@ -3310,7 +3995,7 @@ class IndicesClient(NamespacedClient):
           If an entry already exists with the same key, then it is overwritten by the new definition.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template>`_
 
         :param name: Index or template name
         :param allow_auto_create: This setting overrides the value of the `action.auto_create_index`
@@ -3473,27 +4158,20 @@ class IndicesClient(NamespacedClient):
 
           <p>Update field mappings.
           Add new fields to an existing data stream or index.
-          You can also use this API to change the search settings of existing fields and add new properties to existing object fields.
-          For data streams, these changes are applied to all backing indices by default.</p>
-          <p><strong>Add multi-fields to an existing field</strong></p>
-          <p>Multi-fields let you index the same field in different ways.
-          You can use this API to update the fields mapping parameter and enable multi-fields for an existing field.
-          WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.
-          You can populate the new multi-field with the update by query API.</p>
-          <p><strong>Change supported mapping parameters for an existing field</strong></p>
-          <p>The documentation for each mapping parameter indicates whether you can update it for an existing field using this API.
-          For example, you can use the update mapping API to update the <code>ignore_above</code> parameter.</p>
-          <p><strong>Change the mapping of an existing field</strong></p>
-          <p>Except for supported mapping parameters, you can't change the mapping or field type of an existing field.
-          Changing an existing field could invalidate data that's already indexed.</p>
-          <p>If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.
-          If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.</p>
-          <p><strong>Rename a field</strong></p>
-          <p>Renaming a field would invalidate data already indexed under the old field name.
-          Instead, add an alias field to create an alternate field name.</p>
+          You can use the update mapping API to:</p>
+          <ul>
+          <li>Add a new field to an existing index</li>
+          <li>Update mappings for multiple indices in a single request</li>
+          <li>Add new properties to an object field</li>
+          <li>Enable multi-fields for an existing field</li>
+          <li>Update supported mapping parameters</li>
+          <li>Change a field's mapping using reindexing</li>
+          <li>Rename a field using a field alias</li>
+          </ul>
+          <p>Learn how to use the update mapping API with practical examples in the <a href="https://www.elastic.co/docs/manage-data/data-store/mapping/update-mappings-examples">Update mapping API examples</a> guide.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping>`_
 
         :param index: A comma-separated list of index names the mapping should be added
             to (supports wildcards); use `_all` or omit to add the mapping on all indices.
@@ -3509,7 +4187,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param field_names: Control whether field names are enabled for the index.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
@@ -3617,6 +4295,7 @@ class IndicesClient(NamespacedClient):
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         preserve_existing: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        reopen: t.Optional[bool] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -3626,8 +4305,36 @@ class IndicesClient(NamespacedClient):
           Changes dynamic index settings in real time.
           For data streams, index setting changes are applied to all backing indices by default.</p>
           <p>To revert a setting to the default value, use a null value.
-          The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.
+          The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.
           To preserve existing settings from being updated, set the <code>preserve_existing</code> parameter to <code>true</code>.</p>
+          <p>For performance optimization during bulk indexing, you can disable the refresh interval.
+          Refer to <a href="https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval">disable refresh interval</a> for an example.
+          There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:</p>
+          <pre><code>{
+            &quot;number_of_replicas&quot;: 1
+          }
+          </code></pre>
+          <p>Or you can use an <code>index</code> setting object:</p>
+          <pre><code>{
+            &quot;index&quot;: {
+              &quot;number_of_replicas&quot;: 1
+            }
+          }
+          </code></pre>
+          <p>Or you can use dot annotation:</p>
+          <pre><code>{
+            &quot;index.number_of_replicas&quot;: 1
+          }
+          </code></pre>
+          <p>Or you can embed any of the aforementioned options in a <code>settings</code> object. For example:</p>
+          <pre><code>{
+            &quot;settings&quot;: {
+              &quot;index&quot;: {
+                &quot;number_of_replicas&quot;: 1
+              }
+            }
+          }
+          </code></pre>
           <p>NOTE: You can only define new analyzers on closed indices.
           To add an analyzer, you must close the index, define the analyzer, and reopen the index.
           You cannot close the write index of a data stream.
@@ -3635,10 +4342,11 @@ class IndicesClient(NamespacedClient):
           Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.
           This affects searches and any new data added to the stream after the rollover.
           However, it does not affect the data stream's backing indices or their existing data.
-          To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.</p>
+          To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.
+          Refer to <a href="https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices">updating analyzers on existing indices</a> for step-by-step examples.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings>`_
 
         :param settings:
         :param index: Comma-separated list of data streams, indices, and aliases used
@@ -3659,6 +4367,9 @@ class IndicesClient(NamespacedClient):
             no response is received before the timeout expires, the request fails and
             returns an error.
         :param preserve_existing: If `true`, existing index settings remain unchanged.
+        :param reopen: Whether to close and reopen the index to apply non-dynamic settings.
+            If set to `true` the indices to which the settings are being applied will
+            be closed temporarily and then reopened in order to apply the changes.
         :param timeout: Period to wait for a response. If no response is received before
             the timeout expires, the request fails and returns an error.
         """
@@ -3696,6 +4407,8 @@ class IndicesClient(NamespacedClient):
             __query["preserve_existing"] = preserve_existing
         if pretty is not None:
             __query["pretty"] = pretty
+        if reopen is not None:
+            __query["reopen"] = reopen
         if timeout is not None:
             __query["timeout"] = timeout
         __body = settings if settings is not None else body
@@ -3742,7 +4455,7 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Create or update an index template.
+          <p>Create or update a legacy index template.
           Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
           Elasticsearch applies templates to new indices based on an index pattern that matches the index name.</p>
           <p>IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.</p>
@@ -3759,11 +4472,11 @@ class IndicesClient(NamespacedClient):
           NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-templates-v1.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template>`_
 
         :param name: The name of the template
         :param aliases: Aliases for the index.
-        :param cause:
+        :param cause: User defined reason for creating/updating the index template
         :param create: If true, this request cannot replace or update existing index
             templates.
         :param index_patterns: Array of wildcard expressions used to match the names
@@ -3831,10 +4544,20 @@ class IndicesClient(NamespacedClient):
         *,
         index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         active_only: t.Optional[bool] = None,
+        allow_no_indices: t.Optional[bool] = None,
         detailed: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        ignore_unavailable: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -3861,14 +4584,23 @@ class IndicesClient(NamespacedClient):
           This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
         :param active_only: If `true`, the response only includes ongoing shard recoveries.
+        :param allow_no_indices: If `false`, the request returns an error if any wildcard
+            expression, index alias, or `_all` value targets only missing or closed indices.
+            This behavior applies even if the request targets other open indices.
         :param detailed: If `true`, the response includes detailed information about
             shard recoveries.
+        :param expand_wildcards: Type of index that wildcard patterns can match. If the
+            request can target data streams, this argument determines whether wildcard
+            expressions match hidden data streams. Supports comma-separated values, such
+            as `open,hidden`.
+        :param ignore_unavailable: If `false`, the request returns an error if it targets
+            a missing or closed index.
         """
         __path_parts: t.Dict[str, str]
         if index not in SKIP_IN_PATH:
@@ -3880,14 +4612,20 @@ class IndicesClient(NamespacedClient):
         __query: t.Dict[str, t.Any] = {}
         if active_only is not None:
             __query["active_only"] = active_only
+        if allow_no_indices is not None:
+            __query["allow_no_indices"] = allow_no_indices
         if detailed is not None:
             __query["detailed"] = detailed
         if error_trace is not None:
             __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
         if filter_path is not None:
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if ignore_unavailable is not None:
+            __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -3928,6 +4666,7 @@ class IndicesClient(NamespacedClient):
           For data streams, the API runs the refresh operation on the stream’s backing indices.</p>
           <p>By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.
           You can change this default interval with the <code>index.refresh_interval</code> setting.</p>
+          <p>In Elastic Cloud Serverless, the default refresh interval is 5 seconds across all indices.</p>
           <p>Refresh requests are synchronous and do not return a response until the refresh operation completes.</p>
           <p>Refreshes are resource-intensive.
           To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.</p>
@@ -3935,7 +4674,7 @@ class IndicesClient(NamespacedClient):
           This option ensures the indexing operation waits for a periodic refresh before running the search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -3946,7 +4685,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         """
@@ -4001,6 +4740,7 @@ class IndicesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        resource: t.Optional[str] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -4018,7 +4758,7 @@ class IndicesClient(NamespacedClient):
           This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-reload-analyzers.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers>`_
 
         :param index: A comma-separated list of index names to reload analyzers for
         :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
@@ -4028,6 +4768,7 @@ class IndicesClient(NamespacedClient):
             that are open, closed or both.
         :param ignore_unavailable: Whether specified concrete indices should be ignored
             when unavailable (missing or closed)
+        :param resource: Changed resource to reload analyzers from if applicable
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -4048,6 +4789,8 @@ class IndicesClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
+        if resource is not None:
+            __query["resource"] = resource
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -4059,10 +4802,109 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    async def remove_block(
+        self,
+        *,
+        index: str,
+        block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]],
+        allow_no_indices: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        ignore_unavailable: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Remove an index block.</p>
+          <p>Remove an index block from an index.
+          Index blocks limit the operations allowed on an index by blocking specific operation types.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block>`_
+
+        :param index: A comma-separated list or wildcard expression of index names used
+            to limit the request. By default, you must explicitly name the indices you
+            are removing blocks from. To allow the removal of blocks from indices with
+            `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name`
+            setting to `false`. You can update this setting in the `elasticsearch.yml`
+            file or by using the cluster update settings API.
+        :param block: The block type to remove from the index.
+        :param allow_no_indices: If `false`, the request returns an error if any wildcard
+            expression, index alias, or `_all` value targets only missing or closed indices.
+            This behavior applies even if the request targets other open indices. For
+            example, a request targeting `foo*,bar*` returns an error if an index starts
+            with `foo` but no index starts with `bar`.
+        :param expand_wildcards: The type of index that wildcard patterns can match.
+            If the request can target data streams, this argument determines whether
+            wildcard expressions match hidden data streams. It supports comma-separated
+            values, such as `open,hidden`.
+        :param ignore_unavailable: If `false`, the request returns an error if it targets
+            a missing or closed index.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. It can also be set to `-1` to indicate that the request should
+            never timeout.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. It can
+            also be set to `-1` to indicate that the request should never timeout.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'index'")
+        if block in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'block'")
+        __path_parts: t.Dict[str, str] = {
+            "index": _quote(index),
+            "block": _quote(block),
+        }
+        __path = f'/{__path_parts["index"]}/_block/{__path_parts["block"]}'
+        __query: t.Dict[str, t.Any] = {}
+        if allow_no_indices is not None:
+            __query["allow_no_indices"] = allow_no_indices
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if ignore_unavailable is not None:
+            __query["ignore_unavailable"] = ignore_unavailable
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "DELETE",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.remove_block",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     async def resolve_cluster(
         self,
         *,
-        name: t.Union[str, t.Sequence[str]],
+        name: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         allow_no_indices: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         expand_wildcards: t.Optional[
@@ -4078,19 +4920,20 @@ class IndicesClient(NamespacedClient):
         ignore_throttled: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Resolve the cluster.
-          Resolve the specified index expressions to return information about each cluster, including the local cluster, if included.
-          Multiple patterns and remote clusters are supported.</p>
+          <p>Resolve the cluster.</p>
+          <p>Resolve the specified index expressions to return information about each cluster, including the local &quot;querying&quot; cluster, if included.
+          If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.</p>
           <p>This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.</p>
           <p>You use the same index expression with this endpoint as you would for cross-cluster search.
           Index and cluster exclusions are also supported with this endpoint.</p>
           <p>For each cluster in the index expression, information is returned about:</p>
           <ul>
-          <li>Whether the querying (&quot;local&quot;) cluster is currently connected to each remote cluster in the index expression scope.</li>
+          <li>Whether the querying (&quot;local&quot;) cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the <code>remote/info</code> endpoint.</li>
           <li>Whether each remote cluster is configured with <code>skip_unavailable</code> as <code>true</code> or <code>false</code>.</li>
           <li>Whether there are any indices, aliases, or data streams on that cluster that match the index expression.</li>
           <li>Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).</li>
@@ -4098,7 +4941,13 @@ class IndicesClient(NamespacedClient):
           </ul>
           <p>For example, <code>GET /_resolve/cluster/my-index-*,cluster*:my-index-*</code> returns information about the local cluster and all remotely configured clusters that start with the alias <code>cluster*</code>.
           Each cluster returns information about whether it has any indices, aliases or data streams that match <code>my-index-*</code>.</p>
-          <p><strong>Advantages of using this endpoint before a cross-cluster search</strong></p>
+          <h2>Note on backwards compatibility</h2>
+          <p>The ability to query without an index expression was added in version 8.18, so when
+          querying remote clusters older than that, the local cluster will send the index
+          expression <code>dummy*</code> to those remote clusters. Thus, if an errors occur, you may see a reference
+          to that index expression even though you didn't request it. If it causes a problem, you can
+          instead include an index expression like <code>*:*</code> to bypass the issue.</p>
+          <h2>Advantages of using this endpoint before a cross-cluster search</h2>
           <p>You may want to exclude a cluster or index from a search when:</p>
           <ul>
           <li>A remote cluster is not currently connected and is configured with <code>skip_unavailable=false</code>. Running a cross-cluster search under those conditions will cause the entire search to fail.</li>
@@ -4106,31 +4955,59 @@ class IndicesClient(NamespacedClient):
           <li>The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the &quot;error&quot; field in the <code>_resolve/cluster</code> response will be present. (This is also where security/permission errors will be shown.)</li>
           <li>A remote cluster is an older version that does not support the feature you want to use in your search.</li>
           </ul>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-cluster-api.html>`_
-
-        :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases,
-            and data streams to resolve. Resources on remote clusters can be specified
-            using the `<cluster>`:`<name>` syntax.
+          <h2>Test availability of remote clusters</h2>
+          <p>The <code>remote/info</code> endpoint is commonly used to test whether the &quot;local&quot; cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not.
+          The remote cluster may be available, while the local cluster is not currently connected to it.</p>
+          <p>You can use the <code>_resolve/cluster</code> API to attempt to reconnect to remote clusters.
+          For example with <code>GET _resolve/cluster</code> or <code>GET _resolve/cluster/*:*</code>.
+          The <code>connected</code> field in the response will indicate whether it was successful.
+          If a connection was (re-)established, this will also cause the <code>remote/info</code> endpoint to now indicate a connected status.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster>`_
+
+        :param name: A comma-separated list of names or index patterns for the indices,
+            aliases, and data streams to resolve. Resources on remote clusters can be
+            specified using the `<cluster>`:`<name>` syntax. Index and cluster exclusions
+            (e.g., `-cluster1:*`) are also supported. If no index expression is specified,
+            information about all remote clusters configured on the local cluster is
+            returned without doing any index matching
         :param allow_no_indices: If false, the request returns an error if any wildcard
-            expression, index alias, or _all value targets only missing or closed indices.
+            expression, index alias, or `_all` value targets only missing or closed indices.
             This behavior applies even if the request targets other open indices. For
-            example, a request targeting foo*,bar* returns an error if an index starts
-            with foo but no index starts with bar.
+            example, a request targeting `foo*,bar*` returns an error if an index starts
+            with `foo` but no index starts with `bar`. NOTE: This option is only supported
+            when specifying an index expression. You will get an error if you specify
+            index options to the `_resolve/cluster` API endpoint that takes no index
+            expression.
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
-        :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored
-            when frozen. Defaults to false.
+            as `open,hidden`. NOTE: This option is only supported when specifying an
+            index expression. You will get an error if you specify index options to the
+            `_resolve/cluster` API endpoint that takes no index expression.
+        :param ignore_throttled: If true, concrete, expanded, or aliased indices are
+            ignored when frozen. NOTE: This option is only supported when specifying
+            an index expression. You will get an error if you specify index options to
+            the `_resolve/cluster` API endpoint that takes no index expression.
         :param ignore_unavailable: If false, the request returns an error if it targets
-            a missing or closed index. Defaults to false.
+            a missing or closed index. NOTE: This option is only supported when specifying
+            an index expression. You will get an error if you specify index options to
+            the `_resolve/cluster` API endpoint that takes no index expression.
+        :param timeout: The maximum time to wait for remote clusters to respond. If a
+            remote cluster does not respond within this timeout period, the API response
+            will show the cluster as not connected and include an error message that
+            the request timed out. The default timeout is unset and the query can take
+            as long as the networking layer is configured to wait for remote clusters
+            that are not responding (typically 30 seconds).
         """
-        if name in SKIP_IN_PATH:
-            raise ValueError("Empty value passed for parameter 'name'")
-        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
-        __path = f'/_resolve/cluster/{__path_parts["name"]}'
+        __path_parts: t.Dict[str, str]
+        if name not in SKIP_IN_PATH:
+            __path_parts = {"name": _quote(name)}
+            __path = f'/_resolve/cluster/{__path_parts["name"]}'
+        else:
+            __path_parts = {}
+            __path = "/_resolve/cluster"
         __query: t.Dict[str, t.Any] = {}
         if allow_no_indices is not None:
             __query["allow_no_indices"] = allow_no_indices
@@ -4148,6 +5025,8 @@ class IndicesClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -4176,7 +5055,18 @@ class IndicesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
+        mode: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str, t.Literal["logsdb", "lookup", "standard", "time_series"]
+                    ]
+                ],
+                t.Union[str, t.Literal["logsdb", "lookup", "standard", "time_series"]],
+            ]
+        ] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -4186,7 +5076,7 @@ class IndicesClient(NamespacedClient):
           Multiple patterns and remote clusters are supported.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-index-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index>`_
 
         :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases,
             and data streams to resolve. Resources on remote clusters can be specified
@@ -4199,9 +5089,15 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
+        :param mode: Filter indices by index mode - standard, lookup, time_series, etc.
+            Comma-separated list of IndexMode. Empty means no filter.
+        :param project_routing: Specifies a subset of projects to target using project
+            metadata tags in a subset of Lucene query syntax. Allowed Lucene queries:
+            the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project
+            _alias:_origin _alias:*pr* Supported in serverless only.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -4220,8 +5116,12 @@ class IndicesClient(NamespacedClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
+        if mode is not None:
+            __query["mode"] = mode
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -4246,6 +5146,7 @@ class IndicesClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        lazy: t.Optional[bool] = None,
         mappings: t.Optional[t.Mapping[str, t.Any]] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
@@ -4260,7 +5161,7 @@ class IndicesClient(NamespacedClient):
         .. raw:: html
 
           <p>Roll over to a new index.
-          TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.</p>
+          TIP: We recommend using the index lifecycle rollover action to automate rollovers. However, Serverless does not support Index Lifecycle Management (ILM), so don't use this approach in the Serverless context.</p>
           <p>The rollover API creates a new index for a data stream or index alias.
           The API behavior depends on the rollover target.</p>
           <p><strong>Roll over a data stream</strong></p>
@@ -4287,7 +5188,7 @@ class IndicesClient(NamespacedClient):
           If you roll over the alias on May 7, 2099, the new index's name is <code>my-index-2099.05.07-000002</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-rollover-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover>`_
 
         :param alias: Name of the data stream or index alias to roll over.
         :param new_index: Name of the index to create. Supports date math. Data streams
@@ -4302,6 +5203,9 @@ class IndicesClient(NamespacedClient):
             conditions are satisfied.
         :param dry_run: If `true`, checks whether the current index satisfies the specified
             conditions but does not perform a rollover.
+        :param lazy: If set to true, the rollover action will only mark a data stream
+            to signal that it needs to be rolled over at the next write. Only allowed
+            on data streams.
         :param mappings: Mapping for fields in the index. If specified, this mapping
             can include field names, field data types, and mapping paramaters.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -4336,6 +5240,8 @@ class IndicesClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if lazy is not None:
+            __query["lazy"] = lazy
         if master_timeout is not None:
             __query["master_timeout"] = master_timeout
         if pretty is not None:
@@ -4387,7 +5293,6 @@ class IndicesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
-        verbose: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -4397,7 +5302,7 @@ class IndicesClient(NamespacedClient):
           For data streams, the API returns information about the stream's backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -4408,10 +5313,9 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
-        :param verbose: If `true`, the request returns a verbose response.
         """
         __path_parts: t.Dict[str, str]
         if index not in SKIP_IN_PATH:
@@ -4435,8 +5339,6 @@ class IndicesClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
-        if verbose is not None:
-            __query["verbose"] = verbose
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -4489,7 +5391,7 @@ class IndicesClient(NamespacedClient):
           <p>By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores>`_
 
         :param index: List of data streams, indices, and aliases used to limit the request.
         :param allow_no_indices: If false, the request returns an error if any wildcard
@@ -4591,7 +5493,7 @@ class IndicesClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shrink-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink>`_
 
         :param index: Name of the source index to shrink.
         :param target: Name of the target index to create.
@@ -4636,11 +5538,7 @@ class IndicesClient(NamespacedClient):
                 __body["aliases"] = aliases
             if settings is not None:
                 __body["settings"] = settings
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -4651,15 +5549,21 @@ class IndicesClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
-    @_rewrite_parameters()
+    @_rewrite_parameters(
+        body_name="index_template",
+    )
     async def simulate_index_template(
         self,
         *,
         name: str,
+        cause: t.Optional[str] = None,
+        create: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         include_defaults: t.Optional[bool] = None,
+        index_template: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -4670,20 +5574,31 @@ class IndicesClient(NamespacedClient):
           Get the index configuration that would be applied to the specified index from an existing index template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template>`_
 
         :param name: Name of the index to simulate
+        :param cause: User defined reason for dry-run creating the new template for simulation
+            purposes
+        :param create: Whether the index template we optionally defined in the body should
+            only be dry-run added if new or can also replace an existing one
         :param include_defaults: If true, returns all relevant default configurations
             for the index template.
+        :param index_template:
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
+        if index_template is not None and body is not None:
+            raise ValueError("Cannot set both 'index_template' and 'body'")
         __path_parts: t.Dict[str, str] = {"name": _quote(name)}
         __path = f'/_index_template/_simulate_index/{__path_parts["name"]}'
         __query: t.Dict[str, t.Any] = {}
+        if cause is not None:
+            __query["cause"] = cause
+        if create is not None:
+            __query["create"] = create
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -4696,12 +5611,18 @@ class IndicesClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        __body = index_template if index_template is not None else body
+        if not __body:
+            __body = None
         __headers = {"accept": "application/json"}
+        if __body is not None:
+            __headers["content-type"] = "application/json"
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
             params=__query,
             headers=__headers,
+            body=__body,
             endpoint_id="indices.simulate_index_template",
             path_parts=__path_parts,
         )
@@ -4726,6 +5647,7 @@ class IndicesClient(NamespacedClient):
         *,
         name: t.Optional[str] = None,
         allow_auto_create: t.Optional[bool] = None,
+        cause: t.Optional[str] = None,
         composed_of: t.Optional[t.Sequence[str]] = None,
         create: t.Optional[bool] = None,
         data_stream: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -4751,7 +5673,7 @@ class IndicesClient(NamespacedClient):
           Get the index configuration that would be applied by a particular index template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template>`_
 
         :param name: Name of the index template to simulate. To test a template configuration
             before you add it to the cluster, omit this parameter and specify the template
@@ -4762,6 +5684,8 @@ class IndicesClient(NamespacedClient):
             via `actions.auto_create_index`. If set to `false`, then indices or data
             streams matching the template must always be explicitly created, and may
             never be automatically created.
+        :param cause: User defined reason for dry-run creating the new template for simulation
+            purposes
         :param composed_of: An ordered list of component template names. Component templates
             are merged in the order specified, meaning that the last component template
             specified has the highest precedence.
@@ -4806,6 +5730,8 @@ class IndicesClient(NamespacedClient):
             __path = "/_index_template/_simulate"
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if cause is not None:
+            __query["cause"] = cause
         if create is not None:
             __query["create"] = create
         if error_trace is not None:
@@ -4919,7 +5845,7 @@ class IndicesClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-split-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split>`_
 
         :param index: Name of the source index to split.
         :param target: Name of the target index to create.
@@ -4964,11 +5890,7 @@ class IndicesClient(NamespacedClient):
                 __body["aliases"] = aliases
             if settings is not None:
                 __body["settings"] = settings
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -5021,7 +5943,7 @@ class IndicesClient(NamespacedClient):
           Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats>`_
 
         :param index: A comma-separated list of index names; use `_all` or empty string
             to perform the operation on all indices
@@ -5045,8 +5967,8 @@ class IndicesClient(NamespacedClient):
             are requested).
         :param include_unloaded_segments: If true, the response includes information
             from segments that are not loaded into memory.
-        :param level: Indicates whether statistics are aggregated at the cluster, index,
-            or shard level.
+        :param level: Indicates whether statistics are aggregated at the cluster, indices,
+            or shards level.
         """
         __path_parts: t.Dict[str, str]
         if index not in SKIP_IN_PATH and metric not in SKIP_IN_PATH:
@@ -5098,92 +6020,6 @@ class IndicesClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
-    @_rewrite_parameters()
-    async def unfreeze(
-        self,
-        *,
-        index: str,
-        allow_no_indices: t.Optional[bool] = None,
-        error_trace: t.Optional[bool] = None,
-        expand_wildcards: t.Optional[
-            t.Union[
-                t.Sequence[
-                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
-                ],
-                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
-            ]
-        ] = None,
-        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        human: t.Optional[bool] = None,
-        ignore_unavailable: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        pretty: t.Optional[bool] = None,
-        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        wait_for_active_shards: t.Optional[str] = None,
-    ) -> ObjectApiResponse[t.Any]:
-        """
-        .. raw:: html
-
-          <p>Unfreeze an index.
-          When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.</p>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/unfreeze-index-api.html>`_
-
-        :param index: Identifier for the index.
-        :param allow_no_indices: If `false`, the request returns an error if any wildcard
-            expression, index alias, or `_all` value targets only missing or closed indices.
-            This behavior applies even if the request targets other open indices.
-        :param expand_wildcards: Type of index that wildcard patterns can match. If the
-            request can target data streams, this argument determines whether wildcard
-            expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
-        :param ignore_unavailable: If `false`, the request returns an error if it targets
-            a missing or closed index.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
-        :param wait_for_active_shards: The number of shard copies that must be active
-            before proceeding with the operation. Set to `all` or any positive integer
-            up to the total number of shards in the index (`number_of_replicas+1`).
-        """
-        if index in SKIP_IN_PATH:
-            raise ValueError("Empty value passed for parameter 'index'")
-        __path_parts: t.Dict[str, str] = {"index": _quote(index)}
-        __path = f'/{__path_parts["index"]}/_unfreeze'
-        __query: t.Dict[str, t.Any] = {}
-        if allow_no_indices is not None:
-            __query["allow_no_indices"] = allow_no_indices
-        if error_trace is not None:
-            __query["error_trace"] = error_trace
-        if expand_wildcards is not None:
-            __query["expand_wildcards"] = expand_wildcards
-        if filter_path is not None:
-            __query["filter_path"] = filter_path
-        if human is not None:
-            __query["human"] = human
-        if ignore_unavailable is not None:
-            __query["ignore_unavailable"] = ignore_unavailable
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
-        if pretty is not None:
-            __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
-        if wait_for_active_shards is not None:
-            __query["wait_for_active_shards"] = wait_for_active_shards
-        __headers = {"accept": "application/json"}
-        return await self.perform_request(  # type: ignore[return-value]
-            "POST",
-            __path,
-            params=__query,
-            headers=__headers,
-            endpoint_id="indices.unfreeze",
-            path_parts=__path_parts,
-        )
-
     @_rewrite_parameters(
         body_fields=("actions",),
     )
@@ -5206,7 +6042,7 @@ class IndicesClient(NamespacedClient):
           Adds a data stream or index to an alias.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases>`_
 
         :param actions: Actions to perform.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -5285,7 +6121,7 @@ class IndicesClient(NamespacedClient):
           Validates a query without running it.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-validate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases to search.
             Supports wildcards (`*`). To search all data streams or indices, omit this
@@ -5298,15 +6134,15 @@ class IndicesClient(NamespacedClient):
         :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed.
         :param analyzer: Analyzer to use for the query string. This parameter can only
             be used when the `q` query string parameter is specified.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`.
+        :param default_operator: The default operator for query string query: `and` or
+            `or`.
         :param df: Field to use as default where no field prefix is given in the query
             string. This parameter can only be used when the `q` query string parameter
             is specified.
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param explain: If `true`, the response returns detailed information if an error
             has occurred.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
diff -pruN 8.17.2-2/elasticsearch/_async/client/inference.py 9.2.0-1/elasticsearch/_async/client/inference.py
--- 8.17.2-2/elasticsearch/_async/client/inference.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/inference.py	2025-10-28 16:50:53.000000000 +0000
@@ -25,6 +25,70 @@ from .utils import SKIP_IN_PATH, _quote,
 
 class InferenceClient(NamespacedClient):
 
+    @_rewrite_parameters(
+        body_fields=("input", "task_settings"),
+    )
+    async def completion(
+        self,
+        *,
+        inference_id: str,
+        input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Any] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Perform completion inference on the service</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The inference Id
+        :param input: Inference input. Either a string or an array of strings.
+        :param task_settings: Optional task settings
+        :param timeout: Specifies the amount of time to wait for the inference request
+            to complete.
+        """
+        if inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'inference_id'")
+        if input is None and body is None:
+            raise ValueError("Empty value passed for parameter 'input'")
+        __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
+        __path = f'/_inference/completion/{__path_parts["inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if input is not None:
+                __body["input"] = input
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.completion",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters()
     async def delete(
         self,
@@ -33,7 +97,13 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         dry_run: t.Optional[bool] = None,
@@ -49,14 +119,14 @@ class InferenceClient(NamespacedClient):
           <p>Delete an inference endpoint</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-inference-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete>`_
 
-        :param inference_id: The inference Id
+        :param inference_id: The inference identifier.
         :param task_type: The task type
-        :param dry_run: When true, the endpoint is not deleted, and a list of ingest
-            processors which reference this endpoint is returned
+        :param dry_run: When true, the endpoint is not deleted and a list of ingest processors
+            which reference this endpoint is returned.
         :param force: When true, the inference endpoint is forcefully deleted even if
-            it is still being used by ingest processors or semantic text fields
+            it is still being used by ingest processors or semantic text fields.
         """
         if inference_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'inference_id'")
@@ -102,7 +172,13 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         inference_id: t.Optional[str] = None,
@@ -117,7 +193,7 @@ class InferenceClient(NamespacedClient):
           <p>Get an inference endpoint</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-inference-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get>`_
 
         :param task_type: The task type
         :param inference_id: The inference Id
@@ -155,7 +231,7 @@ class InferenceClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("input", "query", "task_settings"),
+        body_fields=("input", "input_type", "query", "task_settings"),
     )
     async def inference(
         self,
@@ -165,12 +241,19 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        input_type: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         query: t.Optional[str] = None,
         task_settings: t.Optional[t.Any] = None,
@@ -180,18 +263,39 @@ class InferenceClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Perform inference on the service</p>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/post-inference-api.html>`_
-
-        :param inference_id: The inference Id
-        :param input: Inference input. Either a string or an array of strings.
-        :param task_type: The task type
-        :param query: Query input, required for rerank task. Not required for other tasks.
-        :param task_settings: Optional task settings
-        :param timeout: Specifies the amount of time to wait for the inference request
-            to complete.
+          <p>Perform inference on the service.</p>
+          <p>This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.
+          It returns a response with the results of the tasks.
+          The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.</p>
+          <p>For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.</p>
+          <blockquote>
+          <p>info
+          The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
+          </blockquote>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The unique identifier for the inference endpoint.
+        :param input: The text on which you want to perform the inference task. It can
+            be a single string or an array. > info > Inference endpoints for the `completion`
+            task type currently only support a single string as input.
+        :param task_type: The type of inference task that the model performs.
+        :param input_type: Specifies the input data type for the text embedding model.
+            The `input_type` parameter only applies to Inference Endpoints with the `text_embedding`
+            task type. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION`
+            * `CLUSTERING` Not all services support all values. Unsupported values will
+            trigger a validation exception. Accepted values depend on the configured
+            inference service, refer to the relevant service-specific documentation for
+            more info. > info > The `input_type` parameter specified on the root level
+            of the request body will take precedence over the `input_type` parameter
+            specified in `task_settings`.
+        :param query: The query input, which is required only for the `rerank` task.
+            It is not required for other tasks.
+        :param task_settings: Task settings for the individual inference request. These
+            settings are specific to the task type you specified and override the task
+            settings specified when initializing the service.
+        :param timeout: The amount of time to wait for the inference request to complete.
         """
         if inference_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'inference_id'")
@@ -224,15 +328,13 @@ class InferenceClient(NamespacedClient):
         if not __body:
             if input is not None:
                 __body["input"] = input
+            if input_type is not None:
+                __body["input_type"] = input_type
             if query is not None:
                 __body["query"] = query
             if task_settings is not None:
                 __body["task_settings"] = task_settings
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -255,33 +357,61 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Create an inference endpoint.
-          When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
-          After creating the endpoint, wait for the model deployment to complete before using it.
-          To verify the deployment status, use the get trained model statistics API.
-          Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
-          Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
+          <p>Create an inference endpoint.</p>
           <p>IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
           For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.
           However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
+          <p>The following integrations are available through the inference API. You can find the available task types next to the integration name:</p>
+          <ul>
+          <li>AI21 (<code>chat_completion</code>, <code>completion</code>)</li>
+          <li>AlibabaCloud AI Search (<code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
+          <li>Amazon Bedrock (<code>completion</code>, <code>text_embedding</code>)</li>
+          <li>Amazon SageMaker (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
+          <li>Anthropic (<code>completion</code>)</li>
+          <li>Azure AI Studio (<code>completion</code>, 'rerank', <code>text_embedding</code>)</li>
+          <li>Azure OpenAI (<code>completion</code>, <code>text_embedding</code>)</li>
+          <li>Cohere (<code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>DeepSeek (<code>chat_completion</code>, <code>completion</code>)</li>
+          <li>Elasticsearch (<code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code> - this service is for built-in models and models uploaded through Eland)</li>
+          <li>ELSER (<code>sparse_embedding</code>)</li>
+          <li>Google AI Studio (<code>completion</code>, <code>text_embedding</code>)</li>
+          <li>Google Vertex AI (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>Hugging Face (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>JinaAI (<code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>Llama (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
+          <li>Mistral (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
+          <li>OpenAI (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
+          <li>VoyageAI (<code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>Watsonx inference integration (<code>text_embedding</code>)</li>
+          </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-inference-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put>`_
 
         :param inference_id: The inference Id
         :param inference_config:
-        :param task_type: The task type
+        :param task_type: The task type. Refer to the integration list in the API description
+            for the available task types.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
         """
         if inference_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'inference_id'")
@@ -312,6 +442,8 @@ class InferenceClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __body = inference_config if inference_config is not None else body
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
@@ -325,6 +457,2288 @@ class InferenceClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
+        body_fields=("service", "service_settings"),
+    )
+    async def put_ai21(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["chat_completion", "completion"]],
+        ai21_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["ai21"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a AI21 inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>ai21</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-ai21>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param ai21_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `ai21`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `ai21` service.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if ai21_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'ai21_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "ai21_inference_id": _quote(ai21_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["ai21_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_ai21",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_alibabacloud(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"]
+        ],
+        alibabacloud_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an AlibabaCloud AI Search inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>alibabacloud-ai-search</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param alibabacloud_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `alibabacloud-ai-search`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `alibabacloud-ai-search` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if alibabacloud_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'alibabacloud_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "alibabacloud_inference_id": _quote(alibabacloud_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_alibabacloud",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_amazonbedrock(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
+        amazonbedrock_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Amazon Bedrock inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>amazonbedrock</code> service.</p>
+          <blockquote>
+          <p>info
+          You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.</p>
+          </blockquote>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param amazonbedrock_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `amazonbedrock`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `amazonbedrock` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if amazonbedrock_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'amazonbedrock_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_amazonbedrock",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_amazonsagemaker(
+        self,
+        *,
+        task_type: t.Union[
+            str,
+            t.Literal[
+                "chat_completion",
+                "completion",
+                "rerank",
+                "sparse_embedding",
+                "text_embedding",
+            ],
+        ],
+        amazonsagemaker_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["amazon_sagemaker"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Amazon SageMaker inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>amazon_sagemaker</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param amazonsagemaker_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `amazon_sagemaker`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `amazon_sagemaker` service and `service_settings.api`
+            you specified.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type and `service_settings.api` you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if amazonsagemaker_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'amazonsagemaker_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "amazonsagemaker_inference_id": _quote(amazonsagemaker_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonsagemaker_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_amazonsagemaker",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_anthropic(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion"]],
+        anthropic_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Anthropic inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>anthropic</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic>`_
+
+        :param task_type: The task type. The only valid task type for the model to perform
+            is `completion`.
+        :param anthropic_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `anthropic`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `watsonxai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if anthropic_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'anthropic_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "anthropic_inference_id": _quote(anthropic_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_anthropic",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_azureaistudio(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]],
+        azureaistudio_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Azure AI studio inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>azureaistudio</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param azureaistudio_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `azureaistudio`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `openai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if azureaistudio_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'azureaistudio_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "azureaistudio_inference_id": _quote(azureaistudio_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_azureaistudio",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_azureopenai(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
+        azureopenai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Azure OpenAI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>azureopenai</code> service.</p>
+          <p>The list of chat completion models that you can choose from in your Azure OpenAI deployment include:</p>
+          <ul>
+          <li><a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models">GPT-4 and GPT-4 Turbo models</a></li>
+          <li><a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35">GPT-3.5</a></li>
+          </ul>
+          <p>The list of embeddings models that you can choose from in your deployment can be found in the <a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings">Azure models documentation</a>.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+            NOTE: The `chat_completion` task type only supports streaming and only through
+            the _stream API.
+        :param azureopenai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `azureopenai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `azureopenai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if azureopenai_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'azureopenai_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "azureopenai_inference_id": _quote(azureopenai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_azureopenai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_cohere(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]],
+        cohere_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Cohere inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>cohere</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param cohere_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `cohere`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `cohere` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if cohere_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'cohere_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "cohere_inference_id": _quote(cohere_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_cohere",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_contextualai(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["rerank"]],
+        contextualai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["contextualai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Contextual AI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>contexualai</code> service.</p>
+          <p>To review the available <code>rerank</code> models, refer to <a href="https://docs.contextual.ai/api-reference/rerank/rerank#body-model">https://docs.contextual.ai/api-reference/rerank/rerank#body-model</a>.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-contextualai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param contextualai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `contextualai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `contextualai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if contextualai_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'contextualai_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "contextualai_inference_id": _quote(contextualai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["contextualai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_contextualai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_custom(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"]
+        ],
+        custom_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["custom"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a custom inference endpoint.</p>
+          <p>The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations.
+          The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets.
+          The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key.
+          Templates are portions of a string that start with <code>${</code> and end with <code>}</code>.
+          The parameters <code>secret_parameters</code> and <code>task_settings</code> are checked for keys for template replacement. Template replacement is supported in the <code>request</code>, <code>headers</code>, <code>url</code>, and <code>query_parameters</code>.
+          If the definition (key) is not found for a template, an error message is returned.
+          In case of an endpoint definition like the following:</p>
+          <pre><code>PUT _inference/text_embedding/test-text-embedding
+          {
+            &quot;service&quot;: &quot;custom&quot;,
+            &quot;service_settings&quot;: {
+               &quot;secret_parameters&quot;: {
+                    &quot;api_key&quot;: &quot;&lt;some api key&gt;&quot;
+               },
+               &quot;url&quot;: &quot;...endpoints.huggingface.cloud/v1/embeddings&quot;,
+               &quot;headers&quot;: {
+                   &quot;Authorization&quot;: &quot;Bearer ${api_key}&quot;,
+                   &quot;Content-Type&quot;: &quot;application/json&quot;
+               },
+               &quot;request&quot;: &quot;{\\&quot;input\\&quot;: ${input}}&quot;,
+               &quot;response&quot;: {
+                   &quot;json_parser&quot;: {
+                       &quot;text_embeddings&quot;:&quot;$.data[*].embedding[*]&quot;
+                   }
+               }
+            }
+          }
+          </code></pre>
+          <p>To replace <code>${api_key}</code> the <code>secret_parameters</code> and <code>task_settings</code> are checked for a key named <code>api_key</code>.</p>
+          <blockquote>
+          <p>info
+          Templates should not be surrounded by quotes.</p>
+          </blockquote>
+          <p>Pre-defined templates:</p>
+          <ul>
+          <li><code>${input}</code> refers to the array of input strings that comes from the <code>input</code> field of the subsequent inference requests.</li>
+          <li><code>${input_type}</code> refers to the input type translation values.</li>
+          <li><code>${query}</code> refers to the query field used specifically for reranking tasks.</li>
+          <li><code>${top_n}</code> refers to the <code>top_n</code> field available when performing rerank requests.</li>
+          <li><code>${return_documents}</code> refers to the <code>return_documents</code> field available when performing rerank requests.</li>
+          </ul>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param custom_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `custom`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `custom` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if custom_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'custom_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "custom_inference_id": _quote(custom_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["custom_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_custom",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    async def put_deepseek(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["chat_completion", "completion"]],
+        deepseek_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a DeepSeek inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>deepseek</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param deepseek_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `deepseek`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `deepseek` service.
+        :param chunking_settings: The chunking configuration object.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if deepseek_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'deepseek_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "deepseek_inference_id": _quote(deepseek_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["deepseek_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_deepseek",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_elasticsearch(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["rerank", "sparse_embedding", "text_embedding"]
+        ],
+        elasticsearch_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Elasticsearch inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>elasticsearch</code> service.</p>
+          <blockquote>
+          <p>info
+          Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.</p>
+          </blockquote>
+          <p>If you use the ELSER or the E5 model through the <code>elasticsearch</code> service, the API request will automatically download and deploy the model if it isn't downloaded yet.</p>
+          <blockquote>
+          <p>info
+          You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.</p>
+          </blockquote>
+          <p>After creating the endpoint, wait for the model deployment to complete before using it.
+          To verify the deployment status, use the get trained model statistics API.
+          Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
+          Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param elasticsearch_inference_id: The unique identifier of the inference endpoint.
+            The must not match the `model_id`.
+        :param service: The type of service supported for the specified task type. In
+            this case, `elasticsearch`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `elasticsearch` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if elasticsearch_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'elasticsearch_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "elasticsearch_inference_id": _quote(elasticsearch_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_elasticsearch",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    async def put_elser(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["sparse_embedding"]],
+        elser_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["elser"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an ELSER inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>elser</code> service.
+          You can also deploy ELSER by using the Elasticsearch inference integration.</p>
+          <blockquote>
+          <p>info
+          Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.</p>
+          </blockquote>
+          <p>The API request will automatically download and deploy the ELSER model if it isn't already downloaded.</p>
+          <blockquote>
+          <p>info
+          You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.</p>
+          </blockquote>
+          <p>After creating the endpoint, wait for the model deployment to complete before using it.
+          To verify the deployment status, use the get trained model statistics API.
+          Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
+          Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param elser_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `elser`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `elser` service.
+        :param chunking_settings: The chunking configuration object. Note that for ELSER
+            endpoints, the max_chunk_size may not exceed `300`.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if elser_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'elser_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "elser_inference_id": _quote(elser_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_elser",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    async def put_googleaistudio(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
+        googleaistudio_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Google AI Studio inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>googleaistudio</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param googleaistudio_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `googleaistudio`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `googleaistudio` service.
+        :param chunking_settings: The chunking configuration object.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if googleaistudio_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'googleaistudio_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "googleaistudio_inference_id": _quote(googleaistudio_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_googleaistudio",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_googlevertexai(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"]
+        ],
+        googlevertexai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Google Vertex AI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>googlevertexai</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param googlevertexai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `googlevertexai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `googlevertexai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if googlevertexai_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'googlevertexai_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "googlevertexai_inference_id": _quote(googlevertexai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_googlevertexai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_hugging_face(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"]
+        ],
+        huggingface_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Hugging Face inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>hugging_face</code> service.
+          Supported tasks include: <code>text_embedding</code>, <code>completion</code>, and <code>chat_completion</code>.</p>
+          <p>To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint.
+          Select a model that supports the task you intend to use.</p>
+          <p>For Elastic's <code>text_embedding</code> task:
+          The selected model must support the <code>Sentence Embeddings</code> task. On the new endpoint creation page, select the <code>Sentence Embeddings</code> task under the <code>Advanced Configuration</code> section.
+          After the endpoint has initialized, copy the generated endpoint URL.
+          Recommended models for <code>text_embedding</code> task:</p>
+          <ul>
+          <li><code>all-MiniLM-L6-v2</code></li>
+          <li><code>all-MiniLM-L12-v2</code></li>
+          <li><code>all-mpnet-base-v2</code></li>
+          <li><code>e5-base-v2</code></li>
+          <li><code>e5-small-v2</code></li>
+          <li><code>multilingual-e5-base</code></li>
+          <li><code>multilingual-e5-small</code></li>
+          </ul>
+          <p>For Elastic's <code>chat_completion</code> and <code>completion</code> tasks:
+          The selected model must support the <code>Text Generation</code> task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for <code>Text Generation</code>. When creating dedicated endpoint select the <code>Text Generation</code> task.
+          After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes <code>/v1/chat/completions</code> part in URL. Then, copy the full endpoint URL for use.
+          Recommended models for <code>chat_completion</code> and <code>completion</code> tasks:</p>
+          <ul>
+          <li><code>Mistral-7B-Instruct-v0.2</code></li>
+          <li><code>QwQ-32B</code></li>
+          <li><code>Phi-3-mini-128k-instruct</code></li>
+          </ul>
+          <p>For Elastic's <code>rerank</code> task:
+          The selected model must support the <code>sentence-ranking</code> task and expose OpenAI API.
+          HuggingFace supports only dedicated (not serverless) endpoints for <code>Rerank</code> so far.
+          After the endpoint is initialized, copy the full endpoint URL for use.
+          Tested models for <code>rerank</code> task:</p>
+          <ul>
+          <li><code>bge-reranker-base</code></li>
+          <li><code>jina-reranker-v1-turbo-en-GGUF</code></li>
+          </ul>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param huggingface_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `hugging_face`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `hugging_face` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if huggingface_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'huggingface_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "huggingface_inference_id": _quote(huggingface_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_hugging_face",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_jinaai(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["rerank", "text_embedding"]],
+        jinaai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an JinaAI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>jinaai</code> service.</p>
+          <p>To review the available <code>rerank</code> models, refer to <a href="https://jina.ai/reranker">https://jina.ai/reranker</a>.
+          To review the available <code>text_embedding</code> models, refer to the <a href="https://jina.ai/embeddings/">https://jina.ai/embeddings/</a>.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param jinaai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `jinaai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `jinaai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if jinaai_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'jinaai_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "jinaai_inference_id": _quote(jinaai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_jinaai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    async def put_llama(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "text_embedding"]
+        ],
+        llama_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["llama"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Llama inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>llama</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-llama>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param llama_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `llama`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `llama` service.
+        :param chunking_settings: The chunking configuration object.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if llama_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'llama_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "llama_inference_id": _quote(llama_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["llama_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_llama",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    async def put_mistral(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "text_embedding"]
+        ],
+        mistral_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Mistral inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>mistral</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param mistral_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `mistral`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `mistral` service.
+        :param chunking_settings: The chunking configuration object.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if mistral_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'mistral_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "mistral_inference_id": _quote(mistral_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_mistral",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_openai(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "text_embedding"]
+        ],
+        openai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["openai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an OpenAI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>openai</code> service or <code>openai</code> compatible APIs.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+            NOTE: The `chat_completion` task type only supports streaming and only through
+            the _stream API.
+        :param openai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `openai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `openai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if openai_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'openai_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "openai_inference_id": _quote(openai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_openai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    async def put_voyageai(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["rerank", "text_embedding"]],
+        voyageai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a VoyageAI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>voyageai</code> service.</p>
+          <p>Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-voyageai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param voyageai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `voyageai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `voyageai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if voyageai_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'voyageai_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "voyageai_inference_id": _quote(voyageai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_voyageai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings"),
+    )
+    async def put_watsonx(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "text_embedding"]
+        ],
+        watsonx_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Watsonx inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>watsonxai</code> service.
+          You need an IBM Cloud Databases for Elasticsearch deployment to use the <code>watsonxai</code> inference service.
+          You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param watsonx_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `watsonxai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `watsonxai` service.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if watsonx_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'watsonx_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "watsonx_inference_id": _quote(watsonx_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_watsonx",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("input", "query", "task_settings"),
+    )
+    async def rerank(
+        self,
+        *,
+        inference_id: str,
+        input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        query: t.Optional[str] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Any] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Perform reranking inference on the service</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The unique identifier for the inference endpoint.
+        :param input: The text on which you want to perform the inference task. It can
+            be a single string or an array. > info > Inference endpoints for the `completion`
+            task type currently only support a single string as input.
+        :param query: Query input.
+        :param task_settings: Task settings for the individual inference request. These
+            settings are specific to the task type you specified and override the task
+            settings specified when initializing the service.
+        :param timeout: The amount of time to wait for the inference request to complete.
+        """
+        if inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'inference_id'")
+        if input is None and body is None:
+            raise ValueError("Empty value passed for parameter 'input'")
+        if query is None and body is None:
+            raise ValueError("Empty value passed for parameter 'query'")
+        __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
+        __path = f'/_inference/rerank/{__path_parts["inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if input is not None:
+                __body["input"] = input
+            if query is not None:
+                __body["query"] = query
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.rerank",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("input", "task_settings"),
+    )
+    async def sparse_embedding(
+        self,
+        *,
+        inference_id: str,
+        input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Any] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Perform sparse embedding inference on the service</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The inference Id
+        :param input: Inference input. Either a string or an array of strings.
+        :param task_settings: Optional task settings
+        :param timeout: Specifies the amount of time to wait for the inference request
+            to complete.
+        """
+        if inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'inference_id'")
+        if input is None and body is None:
+            raise ValueError("Empty value passed for parameter 'input'")
+        __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
+        __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if input is not None:
+                __body["input"] = input
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.sparse_embedding",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("input", "input_type", "task_settings"),
+    )
+    async def text_embedding(
+        self,
+        *,
+        inference_id: str,
+        input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        input_type: t.Optional[str] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Any] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Perform text embedding inference on the service</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The inference Id
+        :param input: Inference input. Either a string or an array of strings.
+        :param input_type: The input data type for the text embedding model. Possible
+            values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` * `CLUSTERING` Not
+            all services support all values. Unsupported values will trigger a validation
+            exception. Accepted values depend on the configured inference service, refer
+            to the relevant service-specific documentation for more info. > info > The
+            `input_type` parameter specified on the root level of the request body will
+            take precedence over the `input_type` parameter specified in `task_settings`.
+        :param task_settings: Optional task settings
+        :param timeout: Specifies the amount of time to wait for the inference request
+            to complete.
+        """
+        if inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'inference_id'")
+        if input is None and body is None:
+            raise ValueError("Empty value passed for parameter 'input'")
+        __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
+        __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if input is not None:
+                __body["input"] = input
+            if input_type is not None:
+                __body["input_type"] = input_type
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.text_embedding",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
         body_name="inference_config",
     )
     async def update(
@@ -336,7 +2750,13 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         error_trace: t.Optional[bool] = None,
@@ -354,7 +2774,7 @@ class InferenceClient(NamespacedClient):
           However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-inference-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update>`_
 
         :param inference_id: The unique identifier of the inference endpoint.
         :param inference_config:
@@ -392,7 +2812,7 @@ class InferenceClient(NamespacedClient):
         __body = inference_config if inference_config is not None else body
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
-            "POST",
+            "PUT",
             __path,
             params=__query,
             headers=__headers,
diff -pruN 8.17.2-2/elasticsearch/_async/client/ingest.py 9.2.0-1/elasticsearch/_async/client/ingest.py
--- 8.17.2-2/elasticsearch/_async/client/ingest.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/ingest.py	2025-10-28 16:50:53.000000000 +0000
@@ -40,18 +40,18 @@ class IngestClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete GeoIP database configurations.
-          Delete one or more IP geolocation database configurations.</p>
+          <p>Delete GeoIP database configurations.</p>
+          <p>Delete one or more IP geolocation database configurations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-geoip-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database>`_
 
         :param id: A comma-separated list of geoip database configurations to delete
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         if id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'id'")
@@ -98,7 +98,7 @@ class IngestClient(NamespacedClient):
           <p>Delete IP geolocation database configurations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-ip-location-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database>`_
 
         :param id: A comma-separated list of IP location database configurations.
         :param master_timeout: The period to wait for a connection to the master node.
@@ -155,7 +155,7 @@ class IngestClient(NamespacedClient):
           Delete one or more ingest pipelines.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-pipeline-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline>`_
 
         :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the
             request. To delete all ingest pipelines in a cluster, use a value of `*`.
@@ -208,7 +208,7 @@ class IngestClient(NamespacedClient):
           Get download statistics for GeoIP2 databases that are used with the GeoIP processor.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/geoip-processor.html>`_
+        `<https://www.elastic.co/docs/reference/enrich-processor/geoip-processor>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ingest/geoip/stats"
@@ -239,24 +239,20 @@ class IngestClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get GeoIP database configurations.
-          Get information about one or more IP geolocation database configurations.</p>
+          <p>Get GeoIP database configurations.</p>
+          <p>Get information about one or more IP geolocation database configurations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-geoip-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database>`_
 
-        :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard
-            (`*`) expressions are supported. To get all database configurations, omit
-            this parameter or use `*`.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
+        :param id: A comma-separated list of database configuration IDs to retrieve.
+            Wildcard (`*`) expressions are supported. To get all database configurations,
+            omit this parameter or use `*`.
         """
         __path_parts: t.Dict[str, str]
         if id not in SKIP_IN_PATH:
@@ -272,8 +268,6 @@ class IngestClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -294,7 +288,6 @@ class IngestClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -303,15 +296,11 @@ class IngestClient(NamespacedClient):
           <p>Get IP geolocation database configurations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ip-location-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database>`_
 
         :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard
             (`*`) expressions are supported. To get all database configurations, omit
             this parameter or use `*`.
-        :param master_timeout: The period to wait for a connection to the master node.
-            If no response is received before the timeout expires, the request fails
-            and returns an error. A value of `-1` indicates that the request should never
-            time out.
         """
         __path_parts: t.Dict[str, str]
         if id not in SKIP_IN_PATH:
@@ -327,8 +316,6 @@ class IngestClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -356,12 +343,12 @@ class IngestClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get pipelines.
-          Get information about one or more ingest pipelines.
+          <p>Get pipelines.</p>
+          <p>Get information about one or more ingest pipelines.
           This API returns a local reference of the pipeline.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-pipeline-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline>`_
 
         :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions
             are supported. To get all ingest pipelines, omit this parameter or use `*`.
@@ -418,7 +405,7 @@ class IngestClient(NamespacedClient):
           A grok pattern is like a regular expression that supports aliased expressions that can be reused.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/grok-processor.html>`_
+        `<https://www.elastic.co/docs/reference/enrich-processor/grok-processor>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ingest/processor/grok"
@@ -461,11 +448,11 @@ class IngestClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Create or update a GeoIP database configuration.
-          Refer to the create or update IP geolocation database configuration API.</p>
+          <p>Create or update a GeoIP database configuration.</p>
+          <p>Refer to the create or update IP geolocation database configuration API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-geoip-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database>`_
 
         :param id: ID of the database configuration to create or update.
         :param maxmind: The configuration necessary to identify which IP geolocation
@@ -540,7 +527,7 @@ class IngestClient(NamespacedClient):
           <p>Create or update an IP geolocation database configuration.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-ip-location-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database>`_
 
         :param id: The database configuration identifier.
         :param configuration:
@@ -593,6 +580,7 @@ class IngestClient(NamespacedClient):
         body_fields=(
             "deprecated",
             "description",
+            "field_access_pattern",
             "meta",
             "on_failure",
             "processors",
@@ -607,6 +595,9 @@ class IngestClient(NamespacedClient):
         deprecated: t.Optional[bool] = None,
         description: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
+        field_access_pattern: t.Optional[
+            t.Union[str, t.Literal["classic", "flexible"]]
+        ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         if_version: t.Optional[int] = None,
@@ -626,7 +617,7 @@ class IngestClient(NamespacedClient):
           Changes made using this API take effect immediately.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ingest.html>`_
+        `<https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines>`_
 
         :param id: ID of the ingest pipeline to create or update.
         :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated
@@ -634,6 +625,8 @@ class IngestClient(NamespacedClient):
             or updating a non-deprecated index template, Elasticsearch will emit a deprecation
             warning.
         :param description: Description of the ingest pipeline.
+        :param field_access_pattern: Controls how processors in this pipeline should
+            read and write data on a document's source.
         :param if_version: Required version for optimistic concurrency control for pipeline
             updates
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -680,6 +673,8 @@ class IngestClient(NamespacedClient):
                 __body["deprecated"] = deprecated
             if description is not None:
                 __body["description"] = description
+            if field_access_pattern is not None:
+                __body["field_access_pattern"] = field_access_pattern
             if meta is not None:
                 __body["_meta"] = meta
             if on_failure is not None:
@@ -718,17 +713,17 @@ class IngestClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Simulate a pipeline.
-          Run an ingest pipeline against a set of provided documents.
+          <p>Simulate a pipeline.</p>
+          <p>Run an ingest pipeline against a set of provided documents.
           You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/simulate-pipeline-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate>`_
 
         :param docs: Sample documents to test in the pipeline.
-        :param id: Pipeline to test. If you don’t specify a `pipeline` in the request
+        :param id: The pipeline to test. If you don't specify a `pipeline` in the request
             body, this parameter is required.
-        :param pipeline: Pipeline to test. If you don’t specify the `pipeline` request
+        :param pipeline: The pipeline to test. If you don't specify the `pipeline` request
             path parameter, this parameter is required. If you specify both this and
             the request path parameter, the API only uses the request path parameter.
         :param verbose: If `true`, the response includes output data for each processor
diff -pruN 8.17.2-2/elasticsearch/_async/client/license.py 9.2.0-1/elasticsearch/_async/client/license.py
--- 8.17.2-2/elasticsearch/_async/client/license.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/license.py	2025-10-28 16:50:53.000000000 +0000
@@ -32,17 +32,23 @@ class LicenseClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Delete the license.
-          When the license expires, your subscription level reverts to Basic.</p>
+          <p>Delete the license.</p>
+          <p>When the license expires, your subscription level reverts to Basic.</p>
           <p>If the operator privileges feature is enabled, only operator users can use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-license.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete>`_
+
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license"
@@ -53,8 +59,12 @@ class LicenseClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "DELETE",
@@ -79,20 +89,25 @@ class LicenseClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get license information.
-          Get information about your Elastic license including its type, its status, when it was issued, and when it expires.</p>
-          <p>NOTE: If the master node is generating a new cluster state, the get license API may return a <code>404 Not Found</code> response.
+          <p>Get license information.</p>
+          <p>Get information about your Elastic license including its type, its status, when it was issued, and when it expires.</p>
+          <blockquote>
+          <p>info
+          If the master node is generating a new cluster state, the get license API may return a <code>404 Not Found</code> response.
           If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.</p>
+          </blockquote>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-license.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get>`_
 
         :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise
             license types. If `false`, this parameter returns platinum for both platinum
             and enterprise license types. This behavior is maintained for backwards compatibility.
             This parameter is deprecated and will always be set to true in 8.x.
-        :param local: Specifies whether to retrieve local information. The default value
-            is `false`, which means the information is retrieved from the master node.
+        :param local: Specifies whether to retrieve local information. From 9.2 onwards
+            the default value is `true`, which means the information is retrieved from
+            the responding node. In earlier versions the default is `false`, which means
+            the information is retrieved from the elected master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license"
@@ -134,7 +149,7 @@ class LicenseClient(NamespacedClient):
           <p>Get the basic license status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-basic-status.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license/basic_status"
@@ -172,7 +187,7 @@ class LicenseClient(NamespacedClient):
           <p>Get the trial status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-trial-status.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license/trial_status"
@@ -207,14 +222,16 @@ class LicenseClient(NamespacedClient):
         human: t.Optional[bool] = None,
         license: t.Optional[t.Mapping[str, t.Any]] = None,
         licenses: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Update the license.
-          You can update your license at runtime without shutting down your nodes.
+          <p>Update the license.</p>
+          <p>You can update your license at runtime without shutting down your nodes.
           License updates take effect immediately.
           If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response.
           You must then re-submit the API request with the acknowledge parameter set to true.</p>
@@ -222,12 +239,15 @@ class LicenseClient(NamespacedClient):
           If the operator privileges feature is enabled, only operator users can use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-license.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post>`_
 
         :param acknowledge: Specifies whether you acknowledge the license changes.
         :param license:
         :param licenses: A sequence of one or more JSON documents containing the license
             information.
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license"
@@ -241,8 +261,12 @@ class LicenseClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         if not __body:
             if license is not None:
                 __body["license"] = license
@@ -271,23 +295,28 @@ class LicenseClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Start a basic license.
-          Start an indefinite basic license, which gives access to all the basic features.</p>
+          <p>Start a basic license.</p>
+          <p>Start an indefinite basic license, which gives access to all the basic features.</p>
           <p>NOTE: In order to start a basic license, you must not currently have a basic license.</p>
           <p>If the basic license does not support all of the features that are available with your current license, however, you are notified in the response.
           You must then re-submit the API request with the <code>acknowledge</code> parameter set to <code>true</code>.</p>
           <p>To check the status of your basic license, use the get basic license API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-basic.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic>`_
 
         :param acknowledge: whether the user has acknowledged acknowledge messages (default:
             false)
+        :param master_timeout: Period to wait for a connection to the master node.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license/start_basic"
@@ -300,8 +329,12 @@ class LicenseClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -320,8 +353,9 @@ class LicenseClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        type_query_string: t.Optional[str] = None,
+        type: t.Optional[str] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -333,11 +367,12 @@ class LicenseClient(NamespacedClient):
           <p>To check the status of your trial, use the get trial status API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial>`_
 
         :param acknowledge: whether the user has acknowledged acknowledge messages (default:
             false)
-        :param type_query_string:
+        :param master_timeout: Period to wait for a connection to the master node.
+        :param type: The type of trial license to generate (default: "trial")
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license/start_trial"
@@ -350,10 +385,12 @@ class LicenseClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
-        if type_query_string is not None:
-            __query["type_query_string"] = type_query_string
+        if type is not None:
+            __query["type"] = type
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
diff -pruN 8.17.2-2/elasticsearch/_async/client/logstash.py 9.2.0-1/elasticsearch/_async/client/logstash.py
--- 8.17.2-2/elasticsearch/_async/client/logstash.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/logstash.py	2025-10-28 16:50:53.000000000 +0000
@@ -43,7 +43,7 @@ class LogstashClient(NamespacedClient):
           If the request succeeds, you receive an empty response with an appropriate status code.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-delete-pipeline.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline>`_
 
         :param id: An identifier for the pipeline.
         """
@@ -87,7 +87,7 @@ class LogstashClient(NamespacedClient):
           Get pipelines that are used for Logstash Central Management.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline>`_
 
         :param id: A comma-separated list of pipeline identifiers.
         """
@@ -139,9 +139,11 @@ class LogstashClient(NamespacedClient):
           If the specified pipeline exists, it is replaced.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-put-pipeline.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline>`_
 
-        :param id: An identifier for the pipeline.
+        :param id: An identifier for the pipeline. Pipeline IDs must begin with a letter
+            or underscore and contain only letters, underscores, dashes, hyphens and
+            numbers.
         :param pipeline:
         """
         if id in SKIP_IN_PATH:
diff -pruN 8.17.2-2/elasticsearch/_async/client/migration.py 9.2.0-1/elasticsearch/_async/client/migration.py
--- 8.17.2-2/elasticsearch/_async/client/migration.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class MigrationClient(NamespacedClient):
           You are strongly recommended to use the Upgrade Assistant.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations>`_
 
         :param index: Comma-separate list of data streams or indices to check. Wildcard
             (*) expressions are supported.
@@ -94,7 +94,7 @@ class MigrationClient(NamespacedClient):
           You are strongly recommended to use the Upgrade Assistant.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_migration/system_features"
@@ -136,7 +136,7 @@ class MigrationClient(NamespacedClient):
           <p>TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_migration/system_features"
diff -pruN 8.17.2-2/elasticsearch/_async/client/ml.py 9.2.0-1/elasticsearch/_async/client/ml.py
--- 8.17.2-2/elasticsearch/_async/client/ml.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/ml.py	2025-10-28 16:50:53.000000000 +0000
@@ -38,14 +38,14 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Clear trained model deployment cache.
-          Cache will be cleared on all nodes where the trained model is assigned.
+          <p>Clear trained model deployment cache.</p>
+          <p>Cache will be cleared on all nodes where the trained model is assigned.
           A trained model deployment may have an inference cache enabled.
           As requests are handled by each allocated node, their responses may be cached on that individual node.
           Calling this API clears the caches without restarting the deployment.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-trained-model-deployment-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache>`_
 
         :param model_id: The unique identifier of the trained model.
         """
@@ -93,14 +93,14 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Close anomaly detection jobs.
-          A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.
+          <p>Close anomaly detection jobs.</p>
+          <p>A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.
           When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.
           If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.
           When a datafeed that has a specified end date stops, it automatically closes its associated job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-close-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job>`_
 
         :param job_id: Identifier for the anomaly detection job. It can be a job identifier,
             a group name, or a wildcard expression. You can close multiple anomaly detection
@@ -161,11 +161,11 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a calendar.
-          Removes all scheduled events from a calendar, then deletes it.</p>
+          <p>Delete a calendar.</p>
+          <p>Remove all scheduled events from a calendar, then delete it.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-calendar.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         """
@@ -209,7 +209,7 @@ class MlClient(NamespacedClient):
           <p>Delete events from a calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-calendar-event.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param event_id: Identifier for the scheduled event. You can obtain this identifier
@@ -260,7 +260,7 @@ class MlClient(NamespacedClient):
           <p>Delete anomaly jobs from a calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-calendar-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param job_id: An identifier for the anomaly detection jobs. It can be a job
@@ -312,7 +312,7 @@ class MlClient(NamespacedClient):
           <p>Delete a data frame analytics job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job.
         :param force: If `true`, it deletes a job that is not stopped; this method is
@@ -363,7 +363,7 @@ class MlClient(NamespacedClient):
           <p>Delete a datafeed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -415,18 +415,18 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete expired ML data.
-          Deletes all job results, model snapshots and forecast data that have exceeded
+          <p>Delete expired ML data.</p>
+          <p>Delete all job results, model snapshots and forecast data that have exceeded
           their retention days period. Machine learning state documents that are not
           associated with any job are also deleted.
           You can limit the request to a single or set of anomaly detection jobs by
           using a job identifier, a group name, a comma-separated list of jobs, or a
           wildcard expression. You can delete expired data for all anomaly detection
-          jobs by using _all, by specifying * as the &lt;job_id&gt;, or by omitting the
-          &lt;job_id&gt;.</p>
+          jobs by using <code>_all</code>, by specifying <code>*</code> as the <code>&lt;job_id&gt;</code>, or by omitting the
+          <code>&lt;job_id&gt;</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-expired-data.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data>`_
 
         :param job_id: Identifier for an anomaly detection job. It can be a job identifier,
             a group name, or a wildcard expression.
@@ -485,12 +485,12 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a filter.
-          If an anomaly detection job references the filter, you cannot delete the
+          <p>Delete a filter.</p>
+          <p>If an anomaly detection job references the filter, you cannot delete the
           filter. You must update or delete the job before you can delete the filter.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-filter.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter>`_
 
         :param filter_id: A string that uniquely identifies a filter.
         """
@@ -533,14 +533,14 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete forecasts from a job.
-          By default, forecasts are retained for 14 days. You can specify a
+          <p>Delete forecasts from a job.</p>
+          <p>By default, forecasts are retained for 14 days. You can specify a
           different retention period with the <code>expires_in</code> parameter in the forecast
           jobs API. The delete forecast API enables you to delete one or more
           forecasts before they expire.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-forecast.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param forecast_id: A comma-separated list of forecast identifiers. If you do
@@ -607,8 +607,8 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete an anomaly detection job.
-          All job configuration, model state and results are deleted.
+          <p>Delete an anomaly detection job.</p>
+          <p>All job configuration, model state and results are deleted.
           It is not currently possible to delete multiple jobs using wildcards or a
           comma separated list. If you delete a job that has a datafeed, the request
           first tries to delete the datafeed. This behavior is equivalent to calling
@@ -616,7 +616,7 @@ class MlClient(NamespacedClient):
           delete job request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param delete_user_annotations: Specifies whether annotations that have been
@@ -670,13 +670,13 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a model snapshot.
-          You cannot delete the active model snapshot. To delete that snapshot, first
+          <p>Delete a model snapshot.</p>
+          <p>You cannot delete the active model snapshot. To delete that snapshot, first
           revert to a different one. To identify the active model snapshot, refer to
           the <code>model_snapshot_id</code> in the results from the get jobs API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: Identifier for the model snapshot.
@@ -719,19 +719,22 @@ class MlClient(NamespacedClient):
         force: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Delete an unreferenced trained model.
-          The request deletes a trained inference model that is not referenced by an ingest pipeline.</p>
+          <p>Delete an unreferenced trained model.</p>
+          <p>The request deletes a trained inference model that is not referenced by an ingest pipeline.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-trained-models.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model>`_
 
         :param model_id: The unique identifier of the trained model.
         :param force: Forcefully deletes a trained model that is referenced by ingest
             pipelines or has a started deployment.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         if model_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'model_id'")
@@ -748,6 +751,8 @@ class MlClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "DELETE",
@@ -772,13 +777,13 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a trained model alias.
-          This API deletes an existing model alias that refers to a trained model. If
+          <p>Delete a trained model alias.</p>
+          <p>This API deletes an existing model alias that refers to a trained model. If
           the model alias is missing or refers to a model other than the one identified
           by the <code>model_id</code>, this API returns an error.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-trained-models-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias>`_
 
         :param model_id: The trained model ID to which the model alias refers.
         :param model_alias: The model alias to delete.
@@ -833,13 +838,13 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Estimate job model memory usage.
-          Makes an estimation of the memory usage for an anomaly detection job model.
-          It is based on analysis configuration details for the job and cardinality
+          <p>Estimate job model memory usage.</p>
+          <p>Make an estimation of the memory usage for an anomaly detection job model.
+          The estimate is based on analysis configuration details for the job and cardinality
           estimates for the fields it references.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-apis.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory>`_
 
         :param analysis_config: For a list of the properties that you can specify in
             the `analysis_config` component of the body of this API.
@@ -904,14 +909,14 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Evaluate data frame analytics.
-          The API packages together commonly used evaluation metrics for various types
+          <p>Evaluate data frame analytics.</p>
+          <p>The API packages together commonly used evaluation metrics for various types
           of machine learning features. This has been designed for use on indexes
           created by data frame analytics. Evaluation requires both a ground truth
           field and an analytics result field to be present.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/evaluate-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame>`_
 
         :param evaluation: Defines the type of evaluation you want to perform.
         :param index: Defines the `index` in which the evaluation will be performed.
@@ -985,8 +990,8 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Explain data frame analytics config.
-          This API provides explanations for a data frame analytics config that either
+          <p>Explain data frame analytics config.</p>
+          <p>This API provides explanations for a data frame analytics config that either
           exists already or one that has not been created yet. The following
           explanations are provided:</p>
           <ul>
@@ -996,7 +1001,7 @@ class MlClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/explain-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -1107,7 +1112,7 @@ class MlClient(NamespacedClient):
           analyzing further data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-flush-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param advance_time: Refer to the description for the `advance_time` query parameter.
@@ -1182,7 +1187,7 @@ class MlClient(NamespacedClient):
           based on historical data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-forecast.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast>`_
 
         :param job_id: Identifier for the anomaly detection job. The job must be open
             when you create a forecast; otherwise, an error occurs.
@@ -1268,7 +1273,7 @@ class MlClient(NamespacedClient):
           The API presents a chronological view of the records, grouped by bucket.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-bucket.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param timestamp: The timestamp of a single bucket result. If you do not specify
@@ -1366,7 +1371,7 @@ class MlClient(NamespacedClient):
           <p>Get info about events in calendars.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-calendar-event.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events>`_
 
         :param calendar_id: A string that uniquely identifies a calendar. You can get
             information for multiple calendars by using a comma-separated list of ids
@@ -1435,7 +1440,7 @@ class MlClient(NamespacedClient):
           <p>Get calendar configuration info.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-calendar.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars>`_
 
         :param calendar_id: A string that uniquely identifies a calendar. You can get
             information for multiple calendars by using a comma-separated list of ids
@@ -1511,7 +1516,7 @@ class MlClient(NamespacedClient):
           <p>Get anomaly detection job results for categories.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-category.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param category_id: Identifier for the category, which is unique in the job.
@@ -1599,7 +1604,7 @@ class MlClient(NamespacedClient):
           wildcard expression.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. If you do not specify
             this option, the API returns information for the first hundred data frame
@@ -1671,10 +1676,10 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get data frame analytics jobs usage info.</p>
+          <p>Get data frame analytics job stats.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-dfanalytics-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats>`_
 
         :param id: Identifier for the data frame analytics job. If you do not specify
             this option, the API returns information for the first hundred data frame
@@ -1739,7 +1744,7 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get datafeeds usage info.
+          <p>Get datafeed stats.
           You can get statistics for multiple datafeeds in a single API request by
           using a comma-separated list of datafeeds or a wildcard expression. You can
           get statistics for all datafeeds by using <code>_all</code>, by specifying <code>*</code> as the
@@ -1748,7 +1753,7 @@ class MlClient(NamespacedClient):
           This API returns a maximum of 10,000 datafeeds.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-datafeed-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats>`_
 
         :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier
             or a wildcard expression. If you do not specify one of these options, the
@@ -1812,7 +1817,7 @@ class MlClient(NamespacedClient):
           This API returns a maximum of 10,000 datafeeds.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds>`_
 
         :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier
             or a wildcard expression. If you do not specify one of these options, the
@@ -1879,7 +1884,7 @@ class MlClient(NamespacedClient):
           You can get a single filter or all filters.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-filter.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters>`_
 
         :param filter_id: A string that uniquely identifies a filter.
         :param from_: Skips the specified number of filters.
@@ -1947,7 +1952,7 @@ class MlClient(NamespacedClient):
           <code>influencer_field_name</code> is specified in the job configuration.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-influencer.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param desc: If true, the results are sorted in descending order.
@@ -2028,10 +2033,10 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get anomaly detection jobs usage info.</p>
+          <p>Get anomaly detection job stats.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-job-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats>`_
 
         :param job_id: Identifier for the anomaly detection job. It can be a job identifier,
             a group name, a comma-separated list of jobs, or a wildcard expression. If
@@ -2095,7 +2100,7 @@ class MlClient(NamespacedClient):
           <code>_all</code>, by specifying <code>*</code> as the <code>&lt;job_id&gt;</code>, or by omitting the <code>&lt;job_id&gt;</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs>`_
 
         :param job_id: Identifier for the anomaly detection job. It can be a job identifier,
             a group name, or a wildcard expression. If you do not specify one of these
@@ -2161,7 +2166,7 @@ class MlClient(NamespacedClient):
           on each node, both within the JVM heap, and natively, outside of the JVM.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ml-memory.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats>`_
 
         :param node_id: The names of particular nodes in the cluster to target. For example,
             `nodeId1,nodeId2` or `ml:true`
@@ -2219,7 +2224,7 @@ class MlClient(NamespacedClient):
           <p>Get anomaly detection job model snapshot upgrade usage info.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-job-model-snapshot-upgrade-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: A numerical character string that uniquely identifies the
@@ -2293,7 +2298,7 @@ class MlClient(NamespacedClient):
           <p>Get model snapshots info.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: A numerical character string that uniquely identifies the
@@ -2385,7 +2390,7 @@ class MlClient(NamespacedClient):
         exclude_interim: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        overall_score: t.Optional[t.Union[float, str]] = None,
+        overall_score: t.Optional[float] = None,
         pretty: t.Optional[bool] = None,
         start: t.Optional[t.Union[str, t.Any]] = None,
         top_n: t.Optional[int] = None,
@@ -2413,7 +2418,7 @@ class MlClient(NamespacedClient):
           jobs' largest bucket span.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-overall-buckets.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets>`_
 
         :param job_id: Identifier for the anomaly detection job. It can be a job identifier,
             a group name, a comma-separated list of jobs or groups, or a wildcard expression.
@@ -2523,7 +2528,7 @@ class MlClient(NamespacedClient):
           number of detectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-record.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param desc: Refer to the description for the `desc` query parameter.
@@ -2611,7 +2616,6 @@ class MlClient(NamespacedClient):
                 ],
             ]
         ] = None,
-        include_model_definition: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
         size: t.Optional[int] = None,
         tags: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -2622,7 +2626,7 @@ class MlClient(NamespacedClient):
           <p>Get trained model configuration info.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-trained-models.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models>`_
 
         :param model_id: The unique identifier of the trained model or a model alias.
             You can get information for multiple trained models in a single API request
@@ -2641,8 +2645,6 @@ class MlClient(NamespacedClient):
         :param from_: Skips the specified number of models.
         :param include: A comma delimited string of optional fields to include in the
             response body.
-        :param include_model_definition: parameter is deprecated! Use [include=definition]
-            instead
         :param size: Specifies the maximum number of models to obtain.
         :param tags: A comma delimited string of tags. A trained model can have many
             tags, or none. When supplied, only trained models that contain all the supplied
@@ -2672,8 +2674,6 @@ class MlClient(NamespacedClient):
             __query["human"] = human
         if include is not None:
             __query["include"] = include
-        if include_model_definition is not None:
-            __query["include_model_definition"] = include_model_definition
         if pretty is not None:
             __query["pretty"] = pretty
         if size is not None:
@@ -2713,7 +2713,7 @@ class MlClient(NamespacedClient):
           models in a single API request by using a comma-separated list of model IDs or a wildcard expression.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-trained-models-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats>`_
 
         :param model_id: The unique identifier of the trained model or a model alias.
             It can be a comma-separated list or a wildcard expression.
@@ -2779,7 +2779,7 @@ class MlClient(NamespacedClient):
           <p>Evaluate a trained model.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/infer-trained-model.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model>`_
 
         :param model_id: The unique identifier of the trained model.
         :param docs: An array of objects to pass to the model for inference. The objects
@@ -2846,7 +2846,7 @@ class MlClient(NamespacedClient):
           cluster configuration.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ml-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ml/info"
@@ -2886,8 +2886,8 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Open anomaly detection jobs.
-          An anomaly detection job must be opened to be ready to receive and analyze
+          <p>Open anomaly detection jobs.</p>
+          <p>An anomaly detection job must be opened to be ready to receive and analyze
           data. It can be opened and closed multiple times throughout its lifecycle.
           When you open a new job, it starts with an empty model.
           When you open an existing job, the most recent model state is automatically
@@ -2895,7 +2895,7 @@ class MlClient(NamespacedClient):
           new data is received.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-open-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param timeout: Refer to the description for the `timeout` query parameter.
@@ -2952,7 +2952,7 @@ class MlClient(NamespacedClient):
           <p>Add scheduled events to the calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-post-calendar-event.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param events: A list of one of more scheduled events. The event’s start and
@@ -3013,7 +3013,7 @@ class MlClient(NamespacedClient):
           It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-post-data.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data>`_
 
         :param job_id: Identifier for the anomaly detection job. The job must have a
             state of open to receive and process the data.
@@ -3077,10 +3077,10 @@ class MlClient(NamespacedClient):
         .. raw:: html
 
           <p>Preview features used by data frame analytics.
-          Previews the extracted features used by a data frame analytics config.</p>
+          Preview the extracted features used by a data frame analytics config.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/preview-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job.
         :param config: A data frame analytics config as described in create data frame
@@ -3153,7 +3153,7 @@ class MlClient(NamespacedClient):
           You can also use secondary authorization headers to supply the credentials.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-preview-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -3232,7 +3232,7 @@ class MlClient(NamespacedClient):
           <p>Create a calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-calendar.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param description: A description of the calendar.
@@ -3289,7 +3289,7 @@ class MlClient(NamespacedClient):
           <p>Add anomaly detection job to calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-calendar-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param job_id: An identifier for the anomaly detection jobs. It can be a job
@@ -3372,7 +3372,7 @@ class MlClient(NamespacedClient):
           <p>If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -3557,7 +3557,7 @@ class MlClient(NamespacedClient):
           directly to the <code>.ml-config</code> index. Do not give users <code>write</code> privileges on the <code>.ml-config</code> index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -3599,11 +3599,11 @@ class MlClient(NamespacedClient):
         :param ignore_unavailable: If true, unavailable indices (missing or closed) are
             ignored.
         :param indexes: An array of index names. Wildcards are supported. If any of the
-            indices are in remote clusters, the machine learning nodes must have the
-            `remote_cluster_client` role.
+            indices are in remote clusters, the master nodes and the machine learning
+            nodes must have the `remote_cluster_client` role.
         :param indices: An array of index names. Wildcards are supported. If any of the
-            indices are in remote clusters, the machine learning nodes must have the
-            `remote_cluster_client` role.
+            indices are in remote clusters, the master nodes and the machine learning
+            nodes must have the `remote_cluster_client` role.
         :param indices_options: Specifies index expansion options that are used during
             search
         :param job_id: Identifier for the anomaly detection job.
@@ -3719,7 +3719,7 @@ class MlClient(NamespacedClient):
           Specifically, filters are referenced in the <code>custom_rules</code> property of detector configuration objects.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-filter.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter>`_
 
         :param filter_id: A string that uniquely identifies a filter.
         :param description: A description of the filter.
@@ -3816,12 +3816,12 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Create an anomaly detection job.
-          If you include a <code>datafeed_config</code>, you must have read index privileges on the source index.
+          <p>Create an anomaly detection job.</p>
+          <p>If you include a <code>datafeed_config</code>, you must have read index privileges on the source index.
           If you include a <code>datafeed_config</code> but do not provide a query, the datafeed uses <code>{&quot;match_all&quot;: {&quot;boost&quot;: 1}}</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job>`_
 
         :param job_id: The identifier for the anomaly detection job. This identifier
             can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and
@@ -3871,13 +3871,7 @@ class MlClient(NamespacedClient):
         :param description: A description of the job.
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
-            expressions match hidden data streams. Supports comma-separated values. Valid
-            values are: * `all`: Match any data stream or index, including hidden ones.
-            * `closed`: Match closed, non-hidden indices. Also matches any non-hidden
-            data stream. Data streams cannot be closed. * `hidden`: Match hidden data
-            streams and hidden indices. Must be combined with `open`, `closed`, or both.
-            * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden
-            indices. Also matches any non-hidden data stream.
+            expressions match hidden data streams. Supports comma-separated values.
         :param groups: A list of job groups. A job can belong to no groups or many.
         :param ignore_throttled: If `true`, concrete, expanded or aliased indices are
             ignored when frozen.
@@ -4029,7 +4023,7 @@ class MlClient(NamespacedClient):
           Enable you to supply a trained model that is not created by data frame analytics.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-trained-models.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model>`_
 
         :param model_id: The unique identifier of the trained model.
         :param compressed_definition: The compressed (GZipped and Base64 encoded) inference
@@ -4150,7 +4144,7 @@ class MlClient(NamespacedClient):
           returns a warning.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-trained-models-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias>`_
 
         :param model_id: The identifier for the trained model that the alias refers to.
         :param model_alias: The alias to create or update. This value cannot end in numbers.
@@ -4211,7 +4205,7 @@ class MlClient(NamespacedClient):
           <p>Create part of a trained model definition.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-trained-model-definition-part.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part>`_
 
         :param model_id: The unique identifier of the trained model.
         :param part: The definition part number. When the definition is loaded for inference
@@ -4293,7 +4287,7 @@ class MlClient(NamespacedClient):
           The vocabulary is stored in the index as described in <code>inference_config.*.vocabulary</code> of the trained model definition.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-trained-model-vocabulary.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary>`_
 
         :param model_id: The unique identifier of the trained model.
         :param vocabulary: The model vocabulary, which must not be empty.
@@ -4356,7 +4350,7 @@ class MlClient(NamespacedClient):
           comma separated list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-reset-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job>`_
 
         :param job_id: The ID of the job to reset.
         :param delete_user_annotations: Specifies whether annotations that have been
@@ -4420,7 +4414,7 @@ class MlClient(NamespacedClient):
           snapshot after Black Friday or a critical system failure.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-revert-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: You can specify `empty` as the <snapshot_id>. Reverting to
@@ -4495,7 +4489,7 @@ class MlClient(NamespacedClient):
           machine learning info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-set-upgrade-mode.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode>`_
 
         :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts
             all job and datafeed tasks and prohibits new job and datafeed tasks from
@@ -4555,7 +4549,7 @@ class MlClient(NamespacedClient):
           the destination index in advance with custom settings and mappings.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -4618,7 +4612,7 @@ class MlClient(NamespacedClient):
           authorization headers when you created or updated the datafeed, those credentials are used instead.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-start-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -4664,11 +4658,14 @@ class MlClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
-    @_rewrite_parameters()
+    @_rewrite_parameters(
+        body_fields=("adaptive_allocations",),
+    )
     async def start_trained_model_deployment(
         self,
         *,
         model_id: str,
+        adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None,
         cache_size: t.Optional[t.Union[int, str]] = None,
         deployment_id: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
@@ -4683,6 +4680,7 @@ class MlClient(NamespacedClient):
         wait_for: t.Optional[
             t.Union[str, t.Literal["fully_allocated", "started", "starting"]]
         ] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -4691,10 +4689,13 @@ class MlClient(NamespacedClient):
           It allocates the model to every machine learning node.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trained-model-deployment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment>`_
 
         :param model_id: The unique identifier of the trained model. Currently, only
             PyTorch models are supported.
+        :param adaptive_allocations: Adaptive allocations configuration. When enabled,
+            the number of allocations is set based on the current load. If adaptive_allocations
+            is enabled, do not set the number of allocations manually.
         :param cache_size: The inference cache size (in memory outside the JVM heap)
             per node for the model. The default value is the same size as the `model_size_bytes`.
             To disable the cache, `0b` can be provided.
@@ -4704,7 +4705,8 @@ class MlClient(NamespacedClient):
             model in memory but use a separate set of threads to evaluate the model.
             Increasing this value generally increases the throughput. If this setting
             is greater than the number of hardware threads it will automatically be changed
-            to a value less than the number of hardware threads.
+            to a value less than the number of hardware threads. If adaptive_allocations
+            is enabled, do not set this value, because it’s automatically set.
         :param priority: The deployment priority.
         :param queue_capacity: Specifies the number of inference requests that are allowed
             in the queue. After the number of requests exceeds this value, new requests
@@ -4724,6 +4726,7 @@ class MlClient(NamespacedClient):
         __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)}
         __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_start'
         __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
         if cache_size is not None:
             __query["cache_size"] = cache_size
         if deployment_id is not None:
@@ -4748,12 +4751,20 @@ class MlClient(NamespacedClient):
             __query["timeout"] = timeout
         if wait_for is not None:
             __query["wait_for"] = wait_for
+        if not __body:
+            if adaptive_allocations is not None:
+                __body["adaptive_allocations"] = adaptive_allocations
+        if not __body:
+            __body = None  # type: ignore[assignment]
         __headers = {"accept": "application/json"}
+        if __body is not None:
+            __headers["content-type"] = "application/json"
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
             params=__query,
             headers=__headers,
+            body=__body,
             endpoint_id="ml.start_trained_model_deployment",
             path_parts=__path_parts,
         )
@@ -4779,7 +4790,7 @@ class MlClient(NamespacedClient):
           throughout its lifecycle.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/stop-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -4849,7 +4860,7 @@ class MlClient(NamespacedClient):
           multiple times throughout its lifecycle.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-stop-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed>`_
 
         :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds
             in a single API request by using a comma-separated list of datafeeds or a
@@ -4914,7 +4925,7 @@ class MlClient(NamespacedClient):
           <p>Stop a trained model deployment.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/stop-trained-model-deployment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment>`_
 
         :param model_id: The unique identifier of the trained model.
         :param allow_no_match: Specifies what to do when the request: contains wildcard
@@ -4982,7 +4993,7 @@ class MlClient(NamespacedClient):
           <p>Update a data frame analytics job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -5097,7 +5108,7 @@ class MlClient(NamespacedClient):
           those credentials are used instead.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-update-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -5123,13 +5134,7 @@ class MlClient(NamespacedClient):
             check runs only on real-time datafeeds.
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
-            expressions match hidden data streams. Supports comma-separated values. Valid
-            values are: * `all`: Match any data stream or index, including hidden ones.
-            * `closed`: Match closed, non-hidden indices. Also matches any non-hidden
-            data stream. Data streams cannot be closed. * `hidden`: Match hidden data
-            streams and hidden indices. Must be combined with `open`, `closed`, or both.
-            * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden
-            indices. Also matches any non-hidden data stream.
+            expressions match hidden data streams. Supports comma-separated values.
         :param frequency: The interval at which scheduled queries are made while the
             datafeed runs in real time. The default value is either the bucket span for
             short bucket spans, or, for longer bucket spans, a sensible fraction of the
@@ -5264,7 +5269,7 @@ class MlClient(NamespacedClient):
           Updates the description of a filter, adds items, or removes items from the list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-update-filter.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter>`_
 
         :param filter_id: A string that uniquely identifies a filter.
         :param add_items: The items to add to the filter.
@@ -5358,7 +5363,7 @@ class MlClient(NamespacedClient):
           Updates certain properties of an anomaly detection job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-update-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job>`_
 
         :param job_id: Identifier for the job.
         :param allow_lazy_open: Advanced configuration option. Specifies whether this
@@ -5490,7 +5495,7 @@ class MlClient(NamespacedClient):
           Updates certain properties of a snapshot.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-update-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: Identifier for the model snapshot.
@@ -5535,12 +5540,13 @@ class MlClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("number_of_allocations",),
+        body_fields=("adaptive_allocations", "number_of_allocations"),
     )
     async def update_trained_model_deployment(
         self,
         *,
         model_id: str,
+        adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -5554,16 +5560,20 @@ class MlClient(NamespacedClient):
           <p>Update a trained model deployment.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-trained-model-deployment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment>`_
 
         :param model_id: The unique identifier of the trained model. Currently, only
             PyTorch models are supported.
+        :param adaptive_allocations: Adaptive allocations configuration. When enabled,
+            the number of allocations is set based on the current load. If adaptive_allocations
+            is enabled, do not set the number of allocations manually.
         :param number_of_allocations: The number of model allocations on each node where
             the model is deployed. All allocations on a node share the same copy of the
             model in memory but use a separate set of threads to evaluate the model.
             Increasing this value generally increases the throughput. If this setting
             is greater than the number of hardware threads it will automatically be changed
-            to a value less than the number of hardware threads.
+            to a value less than the number of hardware threads. If adaptive_allocations
+            is enabled, do not set this value, because it’s automatically set.
         """
         if model_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'model_id'")
@@ -5580,6 +5590,8 @@ class MlClient(NamespacedClient):
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
+            if adaptive_allocations is not None:
+                __body["adaptive_allocations"] = adaptive_allocations
             if number_of_allocations is not None:
                 __body["number_of_allocations"] = number_of_allocations
         if not __body:
@@ -5614,7 +5626,7 @@ class MlClient(NamespacedClient):
         .. raw:: html
 
           <p>Upgrade a snapshot.
-          Upgrades an anomaly detection model snapshot to the latest major version.
+          Upgrade an anomaly detection model snapshot to the latest major version.
           Over time, older snapshot formats are deprecated and removed. Anomaly
           detection jobs support only snapshots that are from the current or previous
           major version.
@@ -5625,7 +5637,7 @@ class MlClient(NamespacedClient):
           job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-upgrade-job-model-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: A numerical character string that uniquely identifies the
@@ -5704,7 +5716,7 @@ class MlClient(NamespacedClient):
           <p>Validate an anomaly detection job.</p>
 
 
-        `<https://www.elastic.co/guide/en/machine-learning/8.17/ml-jobs.html>`_
+        `<https://www.elastic.co/guide/en/machine-learning/9.2/ml-jobs.html>`_
 
         :param analysis_config:
         :param analysis_limits:
@@ -5777,7 +5789,7 @@ class MlClient(NamespacedClient):
           <p>Validate an anomaly detection job.</p>
 
 
-        `<https://www.elastic.co/guide/en/machine-learning/8.17/ml-jobs.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch>`_
 
         :param detector:
         """
diff -pruN 8.17.2-2/elasticsearch/_async/client/monitoring.py 9.2.0-1/elasticsearch/_async/client/monitoring.py
--- 8.17.2-2/elasticsearch/_async/client/monitoring.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/monitoring.py	2025-10-28 16:50:53.000000000 +0000
@@ -48,7 +48,7 @@ class MonitoringClient(NamespacedClient)
           This API is used by the monitoring features to send monitoring data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/monitor-elasticsearch-cluster.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch>`_
 
         :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload
         :param operations:
diff -pruN 8.17.2-2/elasticsearch/_async/client/nodes.py 9.2.0-1/elasticsearch/_async/client/nodes.py
--- 8.17.2-2/elasticsearch/_async/client/nodes.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/nodes.py	2025-10-28 16:50:53.000000000 +0000
@@ -50,7 +50,7 @@ class NodesClient(NamespacedClient):
           Clear the archived repositories metering information in the cluster.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-repositories-metering-archive-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive>`_
 
         :param node_id: Comma-separated list of node IDs or names used to limit returned
             information.
@@ -105,10 +105,10 @@ class NodesClient(NamespacedClient):
           Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-repositories-metering-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info>`_
 
         :param node_id: Comma-separated list of node IDs or names used to limit returned
-            information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).
+            information.
         """
         if node_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'node_id'")
@@ -143,7 +143,6 @@ class NodesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         ignore_idle_threads: t.Optional[bool] = None,
         interval: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         snapshots: t.Optional[int] = None,
         sort: t.Optional[
@@ -163,15 +162,12 @@ class NodesClient(NamespacedClient):
           The output is plain text with a breakdown of the top hot threads for each node.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-hot-threads.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads>`_
 
         :param node_id: List of node IDs or names used to limit returned information.
         :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket
             select, or to get a task from an empty queue) are filtered out.
         :param interval: The interval to do the second sampling of threads.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
         :param snapshots: Number of samples of thread stacktrace.
         :param sort: The sort order for 'cpu' type (default: total)
         :param threads: Specifies the number of hot threads to provide information for.
@@ -197,8 +193,6 @@ class NodesClient(NamespacedClient):
             __query["ignore_idle_threads"] = ignore_idle_threads
         if interval is not None:
             __query["interval"] = interval
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if snapshots is not None:
@@ -231,27 +225,23 @@ class NodesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         flat_settings: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get node information.
-          By default, the API returns all attributes and core settings for cluster nodes.</p>
+          <p>Get node information.</p>
+          <p>By default, the API returns all attributes and core settings for cluster nodes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info>`_
 
         :param node_id: Comma-separated list of node IDs or names used to limit returned
             information.
         :param metric: Limits the information returned to the specific metrics. Supports
             a comma-separated list, such as http,ingest.
         :param flat_settings: If true, returns settings in flat format.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
         :param timeout: Period to wait for a response. If no response is received before
             the timeout expires, the request fails and returns an error.
         """
@@ -277,8 +267,6 @@ class NodesClient(NamespacedClient):
             __query["flat_settings"] = flat_settings
         if human is not None:
             __query["human"] = human
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if timeout is not None:
@@ -320,7 +308,7 @@ class NodesClient(NamespacedClient):
           Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/secure-settings.html#reloadable-secure-settings>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings>`_
 
         :param node_id: The names of particular nodes in the cluster to target.
         :param secure_settings_password: The password for the Elasticsearch keystore.
@@ -380,10 +368,7 @@ class NodesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         include_segment_file_sizes: t.Optional[bool] = None,
         include_unloaded_segments: t.Optional[bool] = None,
-        level: t.Optional[
-            t.Union[str, t.Literal["cluster", "indices", "shards"]]
-        ] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        level: t.Optional[t.Union[str, t.Literal["indices", "node", "shards"]]] = None,
         pretty: t.Optional[bool] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         types: t.Optional[t.Sequence[str]] = None,
@@ -396,7 +381,7 @@ class NodesClient(NamespacedClient):
           By default, all stats are returned. You can limit the returned information by using metrics.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats>`_
 
         :param node_id: Comma-separated list of node IDs or names used to limit returned
             information.
@@ -417,11 +402,8 @@ class NodesClient(NamespacedClient):
             are requested).
         :param include_unloaded_segments: If `true`, the response includes information
             from segments that are not loaded into memory.
-        :param level: Indicates whether statistics are aggregated at the cluster, index,
-            or shard level.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
+        :param level: Indicates whether statistics are aggregated at the node, indices,
+            or shards level.
         :param timeout: Period to wait for a response. If no response is received before
             the timeout expires, the request fails and returns an error.
         :param types: A comma-separated list of document types for the indexing index
@@ -480,8 +462,6 @@ class NodesClient(NamespacedClient):
             __query["include_unloaded_segments"] = include_unloaded_segments
         if level is not None:
             __query["level"] = level
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if timeout is not None:
@@ -516,7 +496,7 @@ class NodesClient(NamespacedClient):
           <p>Get feature usage information.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-usage.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage>`_
 
         :param node_id: A comma-separated list of node IDs or names to limit the returned
             information; use `_local` to return information from the node you're connecting
diff -pruN 8.17.2-2/elasticsearch/_async/client/project.py 9.2.0-1/elasticsearch/_async/client/project.py
--- 8.17.2-2/elasticsearch/_async/client/project.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/project.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,67 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import typing as t
+
+from elastic_transport import ObjectApiResponse
+
+from ._base import NamespacedClient
+from .utils import (
+    Stability,
+    _rewrite_parameters,
+    _stability_warning,
+)
+
+
+class ProjectClient(NamespacedClient):
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def tags(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Return tags defined for the project</p>
+
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_project/tags"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="project.tags",
+            path_parts=__path_parts,
+        )
diff -pruN 8.17.2-2/elasticsearch/_async/client/query_rules.py 9.2.0-1/elasticsearch/_async/client/query_rules.py
--- 8.17.2-2/elasticsearch/_async/client/query_rules.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/query_rules.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class QueryRulesClient(NamespacedClient)
           This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule>`_
 
         :param ruleset_id: The unique identifier of the query ruleset containing the
             rule to delete
@@ -97,7 +97,7 @@ class QueryRulesClient(NamespacedClient)
           This is a destructive action that is not recoverable.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-ruleset.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset>`_
 
         :param ruleset_id: The unique identifier of the query ruleset to delete
         """
@@ -142,7 +142,7 @@ class QueryRulesClient(NamespacedClient)
           Get details about a query rule within a query ruleset.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule>`_
 
         :param ruleset_id: The unique identifier of the query ruleset containing the
             rule to retrieve
@@ -194,7 +194,7 @@ class QueryRulesClient(NamespacedClient)
           Get details about a query ruleset.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-ruleset.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset>`_
 
         :param ruleset_id: The unique identifier of the query ruleset
         """
@@ -241,7 +241,7 @@ class QueryRulesClient(NamespacedClient)
           Get summarized information about the query rulesets.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-query-rulesets.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets>`_
 
         :param from_: The offset from the first result to fetch.
         :param size: The maximum number of results to retrieve.
@@ -302,7 +302,7 @@ class QueryRulesClient(NamespacedClient)
           If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule>`_
 
         :param ruleset_id: The unique identifier of the query ruleset containing the
             rule to be created or updated.
@@ -389,7 +389,7 @@ class QueryRulesClient(NamespacedClient)
           If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-ruleset.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset>`_
 
         :param ruleset_id: The unique identifier of the query ruleset to be created or
             updated.
@@ -446,7 +446,7 @@ class QueryRulesClient(NamespacedClient)
           Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-query-ruleset.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test>`_
 
         :param ruleset_id: The unique identifier of the query ruleset to be created or
             updated
diff -pruN 8.17.2-2/elasticsearch/_async/client/rollup.py 9.2.0-1/elasticsearch/_async/client/rollup.py
--- 8.17.2-2/elasticsearch/_async/client/rollup.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/rollup.py	2025-10-28 16:50:53.000000000 +0000
@@ -67,7 +67,7 @@ class RollupClient(NamespacedClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-delete-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job>`_
 
         :param id: Identifier for the job.
         """
@@ -115,7 +115,7 @@ class RollupClient(NamespacedClient):
           For details about a historical rollup job, the rollup capabilities API may be more useful.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs>`_
 
         :param id: Identifier for the rollup job. If it is `_all` or omitted, the API
             returns all rollup jobs.
@@ -171,7 +171,7 @@ class RollupClient(NamespacedClient):
           </ol>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps>`_
 
         :param id: Index, indices or index-pattern to return rollup capabilities for.
             `_all` may be used to fetch rollup capabilities from all jobs.
@@ -225,7 +225,7 @@ class RollupClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-index-caps.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps>`_
 
         :param index: Data stream or index to check for rollup capabilities. Wildcard
             (`*`) expressions are supported.
@@ -295,7 +295,7 @@ class RollupClient(NamespacedClient):
           <p>Jobs are created in a <code>STOPPED</code> state. You can start them with the start rollup jobs API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-put-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job>`_
 
         :param id: Identifier for the rollup job. This can be any alphanumeric string
             and uniquely identifies the data that is associated with the rollup job.
@@ -419,31 +419,10 @@ class RollupClient(NamespacedClient):
           The following functionality is not available:</p>
           <p><code>size</code>: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.
           <code>highlighter</code>, <code>suggestors</code>, <code>post_filter</code>, <code>profile</code>, <code>explain</code>: These are similarly disallowed.</p>
-          <p><strong>Searching both historical rollup and non-rollup data</strong></p>
-          <p>The rollup search API has the capability to search across both &quot;live&quot; non-rollup data and the aggregated rollup data.
-          This is done by simply adding the live indices to the URI. For example:</p>
-          <pre><code>GET sensor-1,sensor_rollup/_rollup_search
-          {
-            &quot;size&quot;: 0,
-            &quot;aggregations&quot;: {
-               &quot;max_temperature&quot;: {
-                &quot;max&quot;: {
-                  &quot;field&quot;: &quot;temperature&quot;
-                }
-              }
-            }
-          }
-          </code></pre>
-          <p>The rollup search endpoint does two things when the search runs:</p>
-          <ul>
-          <li>The original request is sent to the non-rollup index unaltered.</li>
-          <li>A rewritten version of the original request is sent to the rollup index.</li>
-          </ul>
-          <p>When the two responses are received, the endpoint rewrites the rollup response and merges the two together.
-          During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.</p>
+          <p>For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search>`_
 
         :param index: A comma-separated list of data streams and indices used to limit
             the request. This parameter has the following rules: * At least one data
@@ -521,7 +500,7 @@ class RollupClient(NamespacedClient):
           If you try to start a job that is already started, nothing happens.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-start-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job>`_
 
         :param id: Identifier for the rollup job.
         """
@@ -575,7 +554,7 @@ class RollupClient(NamespacedClient):
           If the specified time elapses without the job moving to STOPPED, a timeout exception occurs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-stop-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job>`_
 
         :param id: Identifier for the rollup job.
         :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum)
diff -pruN 8.17.2-2/elasticsearch/_async/client/search_application.py 9.2.0-1/elasticsearch/_async/client/search_application.py
--- 8.17.2-2/elasticsearch/_async/client/search_application.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/search_application.py	2025-10-28 16:50:53.000000000 +0000
@@ -45,13 +45,13 @@ class SearchApplicationClient(Namespaced
         """
         .. raw:: html
 
-          <p>Delete a search application.
-          Remove a search application and its associated alias. Indices attached to the search application are not removed.</p>
+          <p>Delete a search application.</p>
+          <p>Remove a search application and its associated alias. Indices attached to the search application are not removed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-search-application.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete>`_
 
-        :param name: The name of the search application to delete
+        :param name: The name of the search application to delete.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -94,7 +94,7 @@ class SearchApplicationClient(Namespaced
           The associated data stream is also deleted.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-analytics-collection.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics>`_
 
         :param name: The name of the analytics collection to be deleted
         """
@@ -138,7 +138,7 @@ class SearchApplicationClient(Namespaced
           <p>Get search application details.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-search-application.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get>`_
 
         :param name: The name of the search application
         """
@@ -182,7 +182,7 @@ class SearchApplicationClient(Namespaced
           <p>Get behavioral analytics collections.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-analytics-collection.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics>`_
 
         :param name: A list of analytics collections to limit the returned information
         """
@@ -234,7 +234,7 @@ class SearchApplicationClient(Namespaced
           Get information about search applications.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-search-applications.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics>`_
 
         :param from_: Starting offset.
         :param q: Query in the Lucene query string syntax.
@@ -290,7 +290,7 @@ class SearchApplicationClient(Namespaced
           <p>Create a behavioral analytics collection event.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/post-analytics-collection-event.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event>`_
 
         :param collection_name: The name of the behavioral analytics collection.
         :param event_type: The analytics event type.
@@ -357,7 +357,7 @@ class SearchApplicationClient(Namespaced
           <p>Create or update a search application.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-search-application.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put>`_
 
         :param name: The name of the search application to be created or updated.
         :param search_application:
@@ -414,7 +414,7 @@ class SearchApplicationClient(Namespaced
           <p>Create a behavioral analytics collection.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-analytics-collection.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics>`_
 
         :param name: The name of the analytics collection to be created or updated.
         """
@@ -467,7 +467,7 @@ class SearchApplicationClient(Namespaced
           <p>You must have <code>read</code> privileges on the backing alias of the search application.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-application-render-query.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query>`_
 
         :param name: The name of the search application to render teh query for.
         :param params:
@@ -531,7 +531,7 @@ class SearchApplicationClient(Namespaced
           Unspecified template parameters are assigned their default values if applicable.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-application-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search>`_
 
         :param name: The name of the search application to be searched.
         :param params: Query parameters specific to this request, which will override
diff -pruN 8.17.2-2/elasticsearch/_async/client/searchable_snapshots.py 9.2.0-1/elasticsearch/_async/client/searchable_snapshots.py
--- 8.17.2-2/elasticsearch/_async/client/searchable_snapshots.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/searchable_snapshots.py	2025-10-28 16:50:53.000000000 +0000
@@ -50,7 +50,7 @@ class SearchableSnapshotsClient(Namespac
           Get statistics about the shared cache for partially mounted indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-cache-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats>`_
 
         :param node_id: The names of the nodes in the cluster to target.
         :param master_timeout:
@@ -111,7 +111,7 @@ class SearchableSnapshotsClient(Namespac
           Clear indices and data streams from the shared cache for partially mounted indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-clear-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             clear from the cache. It supports wildcards (`*`).
@@ -190,7 +190,7 @@ class SearchableSnapshotsClient(Namespac
           Manually mounting ILM-managed snapshots can interfere with ILM processes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-mount-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount>`_
 
         :param repository: The name of the repository containing the snapshot of the
             index to mount.
@@ -278,7 +278,7 @@ class SearchableSnapshotsClient(Namespac
           <p>Get searchable snapshot statistics.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats>`_
 
         :param index: A comma-separated list of data streams and indices to retrieve
             statistics for.
diff -pruN 8.17.2-2/elasticsearch/_async/client/security.py 9.2.0-1/elasticsearch/_async/client/security.py
--- 8.17.2-2/elasticsearch/_async/client/security.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/security.py	2025-10-28 16:50:53.000000000 +0000
@@ -58,7 +58,7 @@ class SecurityClient(NamespacedClient):
           Any updates do not change existing content for either the <code>labels</code> or <code>data</code> fields.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-activate-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile>`_
 
         :param grant_type: The type of grant.
         :param access_token: The user's Elasticsearch access token or JWT. Both `access`
@@ -124,7 +124,7 @@ class SecurityClient(NamespacedClient):
           If the user cannot be authenticated, this API returns a 401 status code.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-authenticate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/_authenticate"
@@ -171,7 +171,7 @@ class SecurityClient(NamespacedClient):
           The bulk delete roles API cannot delete roles that are defined in roles files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-bulk-delete-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role>`_
 
         :param names: An array of role names to delete
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -232,7 +232,7 @@ class SecurityClient(NamespacedClient):
           The bulk create or update roles API cannot update roles that are defined in roles files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-bulk-put-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role>`_
 
         :param roles: A dictionary of role name to RoleDescriptor objects to add or update
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -300,7 +300,7 @@ class SecurityClient(NamespacedClient):
           <p>A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-bulk-update-api-keys.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys>`_
 
         :param ids: The API key identifiers.
         :param expiration: Expiration time for the API keys. By default, API keys never
@@ -378,7 +378,7 @@ class SecurityClient(NamespacedClient):
           <p>Change the passwords of users in the native realm and built-in users.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-change-password.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password>`_
 
         :param username: The user whose password you want to change. If you do not specify
             this parameter, the password is changed for the current user.
@@ -445,7 +445,7 @@ class SecurityClient(NamespacedClient):
           The cache is also automatically cleared on state changes of the security index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-api-key-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache>`_
 
         :param ids: Comma-separated list of API key IDs to evict from the API key cache.
             To evict all API keys, use `*`. Does not support other wildcard patterns.
@@ -491,7 +491,7 @@ class SecurityClient(NamespacedClient):
           The cache is also automatically cleared for applications that have their privileges updated.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-privilege-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges>`_
 
         :param application: A comma-separated list of applications. To clear all applications,
             use an asterism (`*`). It does not support other wildcard patterns.
@@ -541,7 +541,7 @@ class SecurityClient(NamespacedClient):
           For more information, refer to the documentation about controlling the user cache.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms>`_
 
         :param realms: A comma-separated list of realms. To clear all realms, use an
             asterisk (`*`). It does not support other wildcard patterns.
@@ -591,7 +591,7 @@ class SecurityClient(NamespacedClient):
           <p>Evict roles from the native role cache.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-role-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles>`_
 
         :param name: A comma-separated list of roles to evict from the role cache. To
             evict all roles, use an asterisk (`*`). It does not support other wildcard
@@ -643,7 +643,7 @@ class SecurityClient(NamespacedClient):
           The cache for tokens backed by the <code>service_tokens</code> file is cleared automatically on file changes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-service-token-caches.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens>`_
 
         :param namespace: The namespace, which is a top-level grouping of service accounts.
         :param service: The name of the service, which must be unique within its namespace.
@@ -715,7 +715,7 @@ class SecurityClient(NamespacedClient):
           To configure or turn off the API key service, refer to API key service setting documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-create-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key>`_
 
         :param expiration: The expiration time for the API key. By default, API keys
             never expire.
@@ -805,7 +805,7 @@ class SecurityClient(NamespacedClient):
           Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-create-cross-cluster-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key>`_
 
         :param access: The access to be granted to this API key. The access is composed
             of permissions for cross-cluster search and cross-cluster replication. At
@@ -880,7 +880,7 @@ class SecurityClient(NamespacedClient):
           You must actively delete them if they are no longer needed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-create-service-token.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token>`_
 
         :param namespace: The name of the namespace, which is a top-level grouping of
             service accounts.
@@ -966,7 +966,7 @@ class SecurityClient(NamespacedClient):
           The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delegate-pki-authentication.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki>`_
 
         :param x509_certificate_chain: The X509Certificate chain, which is represented
             as an ordered string array. Each string in the array is a base64-encoded
@@ -1030,7 +1030,7 @@ class SecurityClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-privilege.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges>`_
 
         :param application: The name of the application. Application privileges are always
             associated with exactly one application.
@@ -1093,7 +1093,7 @@ class SecurityClient(NamespacedClient):
           The delete roles API cannot remove roles that are defined in roles files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role>`_
 
         :param name: The name of the role.
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -1147,7 +1147,7 @@ class SecurityClient(NamespacedClient):
           The delete role mappings API cannot remove role mappings that are defined in role mapping files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-role-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping>`_
 
         :param name: The distinct name that identifies the role mapping. The name is
             used solely as an identifier to facilitate interaction via the API; it does
@@ -1203,7 +1203,7 @@ class SecurityClient(NamespacedClient):
           <p>Delete service account tokens for a service in a specified namespace.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-service-token.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token>`_
 
         :param namespace: The namespace, which is a top-level grouping of service accounts.
         :param service: The service name.
@@ -1265,7 +1265,7 @@ class SecurityClient(NamespacedClient):
           <p>Delete users from the native realm.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user>`_
 
         :param username: An identifier for the user.
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -1319,7 +1319,7 @@ class SecurityClient(NamespacedClient):
           You can use this API to revoke a user's access to Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-disable-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user>`_
 
         :param username: An identifier for the user.
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -1376,7 +1376,7 @@ class SecurityClient(NamespacedClient):
           To re-enable a disabled user profile, use the enable user profile API .</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-disable-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile>`_
 
         :param uid: Unique identifier for the user profile.
         :param refresh: If 'true', Elasticsearch refreshes the affected shards to make
@@ -1429,7 +1429,7 @@ class SecurityClient(NamespacedClient):
           By default, when you create users, they are enabled.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-enable-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user>`_
 
         :param username: An identifier for the user.
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -1486,7 +1486,7 @@ class SecurityClient(NamespacedClient):
           If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-enable-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile>`_
 
         :param uid: A unique identifier for the user profile.
         :param refresh: If 'true', Elasticsearch refreshes the affected shards to make
@@ -1536,7 +1536,7 @@ class SecurityClient(NamespacedClient):
           Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-kibana-enrollment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/enroll/kibana"
@@ -1577,7 +1577,7 @@ class SecurityClient(NamespacedClient):
           The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-node-enrollment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/enroll/node"
@@ -1626,7 +1626,7 @@ class SecurityClient(NamespacedClient):
           If you have <code>read_security</code>, <code>manage_api_key</code> or greater privileges (including <code>manage_security</code>), this API returns all API keys regardless of ownership.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key>`_
 
         :param active_only: A boolean flag that can be used to query API keys that are
             currently active. An API key is considered active if it is neither invalidated,
@@ -1704,7 +1704,7 @@ class SecurityClient(NamespacedClient):
           <p>Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-builtin-privileges.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/privilege/_builtin"
@@ -1749,7 +1749,7 @@ class SecurityClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-privileges.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges>`_
 
         :param application: The name of the application. Application privileges are always
             associated with exactly one application. If you do not specify this parameter,
@@ -1805,7 +1805,7 @@ class SecurityClient(NamespacedClient):
           The get roles API cannot retrieve roles that are defined in roles files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role>`_
 
         :param name: The name of the role. You can specify multiple roles as a comma-separated
             list. If you do not specify this parameter, the API returns information about
@@ -1856,7 +1856,7 @@ class SecurityClient(NamespacedClient):
           The get role mappings API cannot retrieve role mappings that are defined in role mapping files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-role-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping>`_
 
         :param name: The distinct name that identifies the role mapping. The name is
             used solely as an identifier to facilitate interaction via the API; it does
@@ -1909,7 +1909,7 @@ class SecurityClient(NamespacedClient):
           <p>NOTE: Currently, only the <code>elastic/fleet-server</code> service account is available.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-service-accounts.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts>`_
 
         :param namespace: The name of the namespace. Omit this parameter to retrieve
             information about all service accounts. If you omit this parameter, you must
@@ -1967,7 +1967,7 @@ class SecurityClient(NamespacedClient):
           Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-service-credentials.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials>`_
 
         :param namespace: The name of the namespace.
         :param service: The service name.
@@ -2023,7 +2023,7 @@ class SecurityClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings>`_
 
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
@@ -2052,6 +2052,45 @@ class SecurityClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters()
+    async def get_stats(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get security stats.</p>
+          <p>Gather security usage statistics from all node(s) within the cluster.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats>`_
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_security/stats"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="security.get_stats",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters(
         body_fields=(
             "grant_type",
@@ -2099,7 +2138,7 @@ class SecurityClient(NamespacedClient):
           If you want to invalidate a token immediately, you can do so by using the invalidate token API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-token.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token>`_
 
         :param grant_type: The type of grant. Supported grant types are: `password`,
             `_kerberos`, `client_credentials`, and `refresh_token`.
@@ -2173,7 +2212,7 @@ class SecurityClient(NamespacedClient):
           <p>Get information about users in the native realm and built-in users.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user>`_
 
         :param username: An identifier for the user. You can specify multiple usernames
             as a comma-separated list. If you omit this parameter, the API retrieves
@@ -2213,13 +2252,10 @@ class SecurityClient(NamespacedClient):
     async def get_user_privileges(
         self,
         *,
-        application: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
-        priviledge: t.Optional[str] = None,
-        username: t.Optional[t.Union[None, str]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -2231,20 +2267,11 @@ class SecurityClient(NamespacedClient):
           To check whether a user has a specific list of privileges, use the has privileges API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-user-privileges.html>`_
-
-        :param application: The name of the application. Application privileges are always
-            associated with exactly one application. If you do not specify this parameter,
-            the API returns information about all privileges for all applications.
-        :param priviledge: The name of the privilege. If you do not specify this parameter,
-            the API returns information about all privileges for the requested application.
-        :param username:
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/user/_privileges"
         __query: t.Dict[str, t.Any] = {}
-        if application is not None:
-            __query["application"] = application
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2253,10 +2280,6 @@ class SecurityClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
-        if priviledge is not None:
-            __query["priviledge"] = priviledge
-        if username is not None:
-            __query["username"] = username
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -2288,7 +2311,7 @@ class SecurityClient(NamespacedClient):
           Elastic reserves the right to change or remove this feature in future releases without prior notice.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile>`_
 
         :param uid: A unique identifier for the user profile.
         :param data: A comma-separated list of filters for the `data` field of the profile
@@ -2345,6 +2368,9 @@ class SecurityClient(NamespacedClient):
         human: t.Optional[bool] = None,
         password: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
+        refresh: t.Optional[
+            t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
+        ] = None,
         run_as: t.Optional[str] = None,
         username: t.Optional[str] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
@@ -2372,7 +2398,7 @@ class SecurityClient(NamespacedClient):
           <p>By default, API keys never expire. You can specify expiration information when you create the API keys.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-grant-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key>`_
 
         :param api_key: The API key.
         :param grant_type: The type of grant. Supported grant types are: `access_token`,
@@ -2382,6 +2408,9 @@ class SecurityClient(NamespacedClient):
             types.
         :param password: The user's password. If you specify the `password` grant type,
             this parameter is required. It is not valid with other grant types.
+        :param refresh: If 'true', Elasticsearch refreshes the affected shards to make
+            this operation visible to search. If 'wait_for', it waits for a refresh to
+            make this operation visible to search. If 'false', nothing is done with refreshes.
         :param run_as: The name of the user to be impersonated.
         :param username: The user name that identifies the user. If you specify the `password`
             grant type, this parameter is required. It is not valid with other grant
@@ -2403,6 +2432,8 @@ class SecurityClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if refresh is not None:
+            __query["refresh"] = refresh
         if not __body:
             if api_key is not None:
                 __body["api_key"] = api_key
@@ -2455,6 +2486,7 @@ class SecurityClient(NamespacedClient):
                         "manage_data_frame_transforms",
                         "manage_data_stream_global_retention",
                         "manage_enrich",
+                        "manage_esql",
                         "manage_ilm",
                         "manage_index_templates",
                         "manage_inference",
@@ -2480,6 +2512,7 @@ class SecurityClient(NamespacedClient):
                         "monitor_data_frame_transforms",
                         "monitor_data_stream_global_retention",
                         "monitor_enrich",
+                        "monitor_esql",
                         "monitor_inference",
                         "monitor_ml",
                         "monitor_rollup",
@@ -2519,7 +2552,7 @@ class SecurityClient(NamespacedClient):
           To check the privileges of other users, you must use the run as feature.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-has-privileges.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges>`_
 
         :param user: Username
         :param application:
@@ -2584,7 +2617,7 @@ class SecurityClient(NamespacedClient):
           Elastic reserves the right to change or remove this feature in future releases without prior notice.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-has-privileges-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile>`_
 
         :param privileges: An object containing all the privileges to be checked.
         :param uids: A list of profile IDs. The privileges are checked for associated
@@ -2658,7 +2691,7 @@ class SecurityClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-invalidate-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key>`_
 
         :param id:
         :param ids: A list of API key ids. This parameter cannot be used with any of
@@ -2742,7 +2775,7 @@ class SecurityClient(NamespacedClient):
           If none of these two are specified, then <code>realm_name</code> and/or <code>username</code> need to be specified.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-invalidate-token.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token>`_
 
         :param realm_name: The name of an authentication realm. This parameter cannot
             be used with either `refresh_token` or `token`.
@@ -2810,7 +2843,7 @@ class SecurityClient(NamespacedClient):
           These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-authenticate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate>`_
 
         :param nonce: Associate a client session with an ID token and mitigate replay
             attacks. This value needs to be the same as the one that was provided to
@@ -2867,12 +2900,12 @@ class SecurityClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("access_token", "refresh_token"),
+        body_fields=("token", "refresh_token"),
     )
     async def oidc_logout(
         self,
         *,
-        access_token: t.Optional[str] = None,
+        token: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -2890,13 +2923,13 @@ class SecurityClient(NamespacedClient):
           These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-logout.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout>`_
 
-        :param access_token: The access token to be invalidated.
+        :param token: The access token to be invalidated.
         :param refresh_token: The refresh token to be invalidated.
         """
-        if access_token is None and body is None:
-            raise ValueError("Empty value passed for parameter 'access_token'")
+        if token is None and body is None:
+            raise ValueError("Empty value passed for parameter 'token'")
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/oidc/logout"
         __query: t.Dict[str, t.Any] = {}
@@ -2910,8 +2943,8 @@ class SecurityClient(NamespacedClient):
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
-            if access_token is not None:
-                __body["access_token"] = access_token
+            if token is not None:
+                __body["token"] = token
             if refresh_token is not None:
                 __body["refresh_token"] = refresh_token
         __headers = {"accept": "application/json", "content-type": "application/json"}
@@ -2952,7 +2985,7 @@ class SecurityClient(NamespacedClient):
           These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-prepare-authentication.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication>`_
 
         :param iss: In the case of a third party initiated single sign on, this is the
             issuer identifier for the OP that the RP is to send the authentication request
@@ -3048,7 +3081,7 @@ class SecurityClient(NamespacedClient):
           <p>Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: <code>/</code>, <code>*</code>, <code>:</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-put-privileges.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges>`_
 
         :param privileges:
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -3126,6 +3159,7 @@ class SecurityClient(NamespacedClient):
                         "manage_data_frame_transforms",
                         "manage_data_stream_global_retention",
                         "manage_enrich",
+                        "manage_esql",
                         "manage_ilm",
                         "manage_index_templates",
                         "manage_inference",
@@ -3151,6 +3185,7 @@ class SecurityClient(NamespacedClient):
                         "monitor_data_frame_transforms",
                         "monitor_data_stream_global_retention",
                         "monitor_enrich",
+                        "monitor_esql",
                         "monitor_inference",
                         "monitor_ml",
                         "monitor_rollup",
@@ -3200,9 +3235,12 @@ class SecurityClient(NamespacedClient):
           File-based role management is not available in Elastic Serverless.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-put-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role>`_
 
-        :param name: The name of the role.
+        :param name: The name of the role that is being created or updated. On Elasticsearch
+            Serverless, the role name must begin with a letter or digit and can only
+            contain letters, digits and the characters '_', '-', and '.'. Each role must
+            have a unique name, as this will serve as the identifier for that role.
         :param applications: A list of application privilege entries.
         :param cluster: A list of cluster privileges. These privileges define the cluster-level
             actions for users with this role.
@@ -3332,7 +3370,7 @@ class SecurityClient(NamespacedClient):
           If the format of the template is set to &quot;json&quot; then the template is expected to produce a JSON string or an array of JSON strings for the role names.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-put-role-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping>`_
 
         :param name: The distinct name that identifies the role mapping. The name is
             used solely as an identifier to facilitate interaction via the API; it does
@@ -3434,7 +3472,7 @@ class SecurityClient(NamespacedClient):
           To change a user's password without updating any other fields, use the change password API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-put-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user>`_
 
         :param username: An identifier for the user. NOTE: Usernames must be at least
             1 and no more than 507 characters. They can contain alphanumeric characters
@@ -3528,7 +3566,7 @@ class SecurityClient(NamespacedClient):
         pretty: t.Optional[bool] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         size: t.Optional[int] = None,
         sort: t.Optional[
@@ -3550,10 +3588,11 @@ class SecurityClient(NamespacedClient):
           You can optionally filter the results with a query.</p>
           <p>To use this API, you must have at least the <code>manage_own_api_key</code> or the <code>read_security</code> cluster privileges.
           If you have only the <code>manage_own_api_key</code> privilege, this API returns only the API keys that you own.
-          If you have the <code>read_security</code>, <code>manage_api_key</code>, or greater privileges (including <code>manage_security</code>), this API returns all API keys regardless of ownership.</p>
+          If you have the <code>read_security</code>, <code>manage_api_key</code>, or greater privileges (including <code>manage_security</code>), this API returns all API keys regardless of ownership.
+          Refer to the linked documentation for examples of how to find API keys:</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-query-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys>`_
 
         :param aggregations: Any aggregations to run over the corpus of returned API
             keys. Aggregations and queries work together. Aggregations are computed only
@@ -3674,7 +3713,7 @@ class SecurityClient(NamespacedClient):
         pretty: t.Optional[bool] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         size: t.Optional[int] = None,
         sort: t.Optional[
@@ -3696,7 +3735,7 @@ class SecurityClient(NamespacedClient):
           Also, the results can be paginated and sorted.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-query-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role>`_
 
         :param from_: The starting document offset. It must not be negative. By default,
             you cannot page through more than 10,000 hits using the `from` and `size`
@@ -3767,7 +3806,7 @@ class SecurityClient(NamespacedClient):
         pretty: t.Optional[bool] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         size: t.Optional[int] = None,
         sort: t.Optional[
@@ -3789,7 +3828,7 @@ class SecurityClient(NamespacedClient):
           This API is only for native users.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-query-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user>`_
 
         :param from_: The starting document offset. It must not be negative. By default,
             you cannot page through more than 10,000 hits using the `from` and `size`
@@ -3882,7 +3921,7 @@ class SecurityClient(NamespacedClient):
           This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-authenticate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate>`_
 
         :param content: The SAML response as it was sent by the user's browser, usually
             a Base64 encoded XML document.
@@ -3955,7 +3994,7 @@ class SecurityClient(NamespacedClient):
           The caller of this API must prepare the request accordingly so that this API can handle either of them.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-complete-logout.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout>`_
 
         :param ids: A JSON array with all the valid SAML Request Ids that the caller
             of the API has for the current user.
@@ -4031,7 +4070,7 @@ class SecurityClient(NamespacedClient):
           Thus the user can be redirected back to their IdP.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-invalidate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate>`_
 
         :param query_string: The query part of the URL that the user was redirected to
             by the SAML IdP to initiate the Single Logout. This query should include
@@ -4106,7 +4145,7 @@ class SecurityClient(NamespacedClient):
           If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-logout.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout>`_
 
         :param token: The access token that was returned as a response to calling the
             SAML authenticate API. Alternatively, the most recent token that was received
@@ -4176,7 +4215,7 @@ class SecurityClient(NamespacedClient):
           The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-prepare-authentication.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication>`_
 
         :param acs: The Assertion Consumer Service URL that matches the one of the SAML
             realms in Elasticsearch. The realm is used to generate the authentication
@@ -4237,7 +4276,7 @@ class SecurityClient(NamespacedClient):
           This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-sp-metadata.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata>`_
 
         :param realm_name: The name of the SAML realm in Elasticsearch.
         """
@@ -4290,7 +4329,7 @@ class SecurityClient(NamespacedClient):
           Elastic reserves the right to change or remove this feature in future releases without prior notice.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-suggest-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles>`_
 
         :param data: A comma-separated list of filters for the `data` field of the profile
             document. To return all content use `data=*`. To return a subset of content,
@@ -4377,7 +4416,7 @@ class SecurityClient(NamespacedClient):
           This change can occur if the owner user's permissions have changed since the API key was created or last modified.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-update-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key>`_
 
         :param id: The ID of the API key to update.
         :param expiration: The expiration time for the API key. By default, API keys
@@ -4463,9 +4502,10 @@ class SecurityClient(NamespacedClient):
           <p>This API supports updates to an API key's access scope, metadata, and expiration.
           The owner user's information, such as the <code>username</code> and <code>realm</code>, is also updated automatically on every call.</p>
           <p>NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.</p>
+          <p>To learn more about how to use this API, refer to the <a href="https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples">Update cross cluter API key API examples page</a>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-update-cross-cluster-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key>`_
 
         :param id: The ID of the cross-cluster API key to update.
         :param access: The access to be granted to this API key. The access is composed
@@ -4544,7 +4584,7 @@ class SecurityClient(NamespacedClient):
           This API does not yet support configuring the settings for indices before they are in use.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-update-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -4629,7 +4669,7 @@ class SecurityClient(NamespacedClient):
           The <code>update_profile_data</code> global privilege grants privileges for updating only the allowed namespaces.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-update-user-profile-data.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data>`_
 
         :param uid: A unique identifier for the user profile.
         :param data: Non-searchable data that you want to associate with the user profile.
diff -pruN 8.17.2-2/elasticsearch/_async/client/shutdown.py 9.2.0-1/elasticsearch/_async/client/shutdown.py
--- 8.17.2-2/elasticsearch/_async/client/shutdown.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/shutdown.py	2025-10-28 16:50:53.000000000 +0000
@@ -33,13 +33,9 @@ class ShutdownClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -53,7 +49,7 @@ class ShutdownClient(NamespacedClient):
           <p>If the operator privileges feature is enabled, you must be an operator to use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-shutdown.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node>`_
 
         :param node_id: The node id of node to be removed from the shutdown state
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -97,13 +93,8 @@ class ShutdownClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -115,14 +106,12 @@ class ShutdownClient(NamespacedClient):
           <p>If the operator privileges feature is enabled, you must be an operator to use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node>`_
 
         :param node_id: Which node for which to retrieve the shutdown status
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str]
         if node_id not in SKIP_IN_PATH:
@@ -142,8 +131,6 @@ class ShutdownClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -169,14 +156,10 @@ class ShutdownClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         target_node_name: t.Optional[str] = None,
-        timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -194,7 +177,7 @@ class ShutdownClient(NamespacedClient):
           Monitor the node shutdown status to determine when it is safe to stop Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-shutdown.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node>`_
 
         :param node_id: The node identifier. This parameter is not validated against
             the cluster's active nodes. This enables you to register a node for shut
diff -pruN 8.17.2-2/elasticsearch/_async/client/simulate.py 9.2.0-1/elasticsearch/_async/client/simulate.py
--- 8.17.2-2/elasticsearch/_async/client/simulate.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/simulate.py	2025-10-28 16:50:53.000000000 +0000
@@ -35,7 +35,7 @@ class SimulateClient(NamespacedClient):
         body_fields=(
             "docs",
             "component_template_substitutions",
-            "index_template_subtitutions",
+            "index_template_substitutions",
             "mapping_addition",
             "pipeline_substitutions",
         ),
@@ -52,10 +52,11 @@ class SimulateClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        index_template_subtitutions: t.Optional[
+        index_template_substitutions: t.Optional[
             t.Mapping[str, t.Mapping[str, t.Any]]
         ] = None,
         mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None,
+        merge_type: t.Optional[t.Union[str, t.Literal["index", "template"]]] = None,
         pipeline: t.Optional[str] = None,
         pipeline_substitutions: t.Optional[
             t.Mapping[str, t.Mapping[str, t.Any]]
@@ -81,7 +82,7 @@ class SimulateClient(NamespacedClient):
           These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/simulate-ingest-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest>`_
 
         :param docs: Sample documents to test in the pipeline.
         :param index: The index to simulate ingesting into. This value can be overridden
@@ -90,9 +91,14 @@ class SimulateClient(NamespacedClient):
             an index argument.
         :param component_template_substitutions: A map of component template names to
             substitute component template definition objects.
-        :param index_template_subtitutions: A map of index template names to substitute
+        :param index_template_substitutions: A map of index template names to substitute
             index template definition objects.
         :param mapping_addition:
+        :param merge_type: The mapping merge type if mapping overrides are being provided
+            in mapping_addition. The allowed values are one of index or template. The
+            index option merges mappings the way they would be merged into an existing
+            index. The template option merges mappings the way they would be merged into
+            a template.
         :param pipeline: The pipeline to use as the default pipeline. This value can
             be used to override the default pipeline of the index.
         :param pipeline_substitutions: Pipelines to test. If you don’t specify the `pipeline`
@@ -116,6 +122,8 @@ class SimulateClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if merge_type is not None:
+            __query["merge_type"] = merge_type
         if pipeline is not None:
             __query["pipeline"] = pipeline
         if pretty is not None:
@@ -127,8 +135,8 @@ class SimulateClient(NamespacedClient):
                 __body["component_template_substitutions"] = (
                     component_template_substitutions
                 )
-            if index_template_subtitutions is not None:
-                __body["index_template_subtitutions"] = index_template_subtitutions
+            if index_template_substitutions is not None:
+                __body["index_template_substitutions"] = index_template_substitutions
             if mapping_addition is not None:
                 __body["mapping_addition"] = mapping_addition
             if pipeline_substitutions is not None:
diff -pruN 8.17.2-2/elasticsearch/_async/client/slm.py 9.2.0-1/elasticsearch/_async/client/slm.py
--- 8.17.2-2/elasticsearch/_async/client/slm.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/slm.py	2025-10-28 16:50:53.000000000 +0000
@@ -45,7 +45,7 @@ class SlmClient(NamespacedClient):
           This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-delete-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle>`_
 
         :param policy_id: The id of the snapshot lifecycle policy to remove
         :param master_timeout: The period to wait for a connection to the master node.
@@ -101,7 +101,7 @@ class SlmClient(NamespacedClient):
           The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-execute-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle>`_
 
         :param policy_id: The id of the snapshot lifecycle policy to be executed
         :param master_timeout: The period to wait for a connection to the master node.
@@ -156,7 +156,7 @@ class SlmClient(NamespacedClient):
           The retention policy is normally applied according to its schedule.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-execute-retention.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -208,7 +208,7 @@ class SlmClient(NamespacedClient):
           Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle>`_
 
         :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve
         :param master_timeout: The period to wait for a connection to the master node.
@@ -265,7 +265,7 @@ class SlmClient(NamespacedClient):
           Get global and policy-level statistics about actions taken by snapshot lifecycle management.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats>`_
 
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
@@ -315,7 +315,7 @@ class SlmClient(NamespacedClient):
           <p>Get the snapshot lifecycle management status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-status.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -379,7 +379,7 @@ class SlmClient(NamespacedClient):
           Only the latest version of a policy is stored.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-put-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle>`_
 
         :param policy_id: The identifier for the snapshot lifecycle policy you want to
             create or update.
@@ -431,11 +431,7 @@ class SlmClient(NamespacedClient):
                 __body["retention"] = retention
             if schedule is not None:
                 __body["schedule"] = schedule
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -465,7 +461,7 @@ class SlmClient(NamespacedClient):
           Manually starting SLM is necessary only if it has been stopped using the stop SLM API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-start.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -523,7 +519,7 @@ class SlmClient(NamespacedClient):
           Use the get snapshot lifecycle management status API to see if SLM is running.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-stop.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
diff -pruN 8.17.2-2/elasticsearch/_async/client/snapshot.py 9.2.0-1/elasticsearch/_async/client/snapshot.py
--- 8.17.2-2/elasticsearch/_async/client/snapshot.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/snapshot.py	2025-10-28 16:50:53.000000000 +0000
@@ -50,11 +50,18 @@ class SnapshotClient(NamespacedClient):
           Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clean-up-snapshot-repo-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository>`_
 
-        :param name: Snapshot repository to clean up.
-        :param master_timeout: Period to wait for a connection to the master node.
-        :param timeout: Period to wait for a response.
+        :param name: The name of the snapshot repository to clean up.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. To indicate that the request should never timeout,
+            set it to `-1`
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. To indicate
+            that the request should never timeout, set it to `-1`.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -98,7 +105,6 @@ class SnapshotClient(NamespacedClient):
         human: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -108,14 +114,17 @@ class SnapshotClient(NamespacedClient):
           Clone part of all of a snapshot into another snapshot in the same repository.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clone-snapshot-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone>`_
 
-        :param repository: A repository name
-        :param snapshot: The name of the snapshot to clone from
-        :param target_snapshot: The name of the cloned snapshot to create
-        :param indices:
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout:
+        :param repository: The name of the snapshot repository that both source and target
+            snapshot belong to.
+        :param snapshot: The source snapshot name.
+        :param target_snapshot: The target snapshot name.
+        :param indices: A comma-separated list of indices to include in the snapshot.
+            Multi-target syntax is supported.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
         """
         if repository in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'repository'")
@@ -143,8 +152,6 @@ class SnapshotClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
         if not __body:
             if indices is not None:
                 __body["indices"] = indices
@@ -161,6 +168,7 @@ class SnapshotClient(NamespacedClient):
 
     @_rewrite_parameters(
         body_fields=(
+            "expand_wildcards",
             "feature_states",
             "ignore_unavailable",
             "include_global_state",
@@ -175,6 +183,14 @@ class SnapshotClient(NamespacedClient):
         repository: str,
         snapshot: str,
         error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
         feature_states: t.Optional[t.Sequence[str]] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -195,15 +211,22 @@ class SnapshotClient(NamespacedClient):
           Take a snapshot of a cluster or of data streams and indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-snapshot-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create>`_
 
-        :param repository: Repository for the snapshot.
-        :param snapshot: Name of the snapshot. Must be unique in the repository.
-        :param feature_states: Feature states to include in the snapshot. Each feature
+        :param repository: The name of the repository for the snapshot.
+        :param snapshot: The name of the snapshot. It supportes date math. It must be
+            unique in the repository.
+        :param expand_wildcards: Determines how wildcard patterns in the `indices` parameter
+            match data streams and indices. It supports comma-separated values such as
+            `open,hidden`.
+        :param feature_states: The feature states to include in the snapshot. Each feature
             state includes one or more system indices containing related data. You can
             view a list of eligible features using the get features API. If `include_global_state`
             is `true`, all current feature states are included by default. If `include_global_state`
-            is `false`, no feature states are included by default.
+            is `false`, no feature states are included by default. Note that specifying
+            an empty array will result in the default behavior. To exclude all feature
+            states, regardless of the `include_global_state` value, specify an array
+            with only the value `none` (`["none"]`).
         :param ignore_unavailable: If `true`, the request ignores data streams and indices
             in `indices` that are missing or closed. If `false`, the request returns
             an error for any data stream or index that is missing or closed.
@@ -212,18 +235,24 @@ class SnapshotClient(NamespacedClient):
             composable index templates, legacy index templates, ingest pipelines, and
             ILM policies. It also includes data stored in system indices, such as Watches
             and task records (configurable via `feature_states`).
-        :param indices: Data streams and indices to include in the snapshot. Supports
-            multi-target syntax. Includes all data streams and indices by default.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
-        :param metadata: Optional metadata for the snapshot. May have any contents. Must
-            be less than 1024 bytes. This map is not automatically generated by Elasticsearch.
-        :param partial: If `true`, allows restoring a partial snapshot of indices with
-            unavailable shards. Only shards that were successfully included in the snapshot
-            will be restored. All missing shards will be recreated as empty. If `false`,
-            the entire restore operation will fail if one or more indices included in
-            the snapshot do not have all primary shards available.
+        :param indices: A comma-separated list of data streams and indices to include
+            in the snapshot. It supports a multi-target syntax. The default is an empty
+            array (`[]`), which includes all regular data streams and regular indices.
+            To exclude all data streams and indices, use `-*`. You can't use this parameter
+            to include or exclude system indices or system data streams from a snapshot.
+            Use `feature_states` instead.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param metadata: Arbitrary metadata to the snapshot, such as a record of who
+            took the snapshot, why it was taken, or any other useful data. It can have
+            any contents but it must be less than 1024 bytes. This information is not
+            automatically generated by Elasticsearch.
+        :param partial: If `true`, it enables you to restore a partial snapshot of indices
+            with unavailable shards. Only shards that were successfully included in the
+            snapshot will be restored. All missing shards will be recreated as empty.
+            If `false`, the entire restore operation will fail if one or more indices
+            included in the snapshot do not have all primary shards available.
         :param wait_for_completion: If `true`, the request returns a response when the
             snapshot is complete. If `false`, the request returns a response when the
             snapshot initializes.
@@ -252,6 +281,8 @@ class SnapshotClient(NamespacedClient):
         if wait_for_completion is not None:
             __query["wait_for_completion"] = wait_for_completion
         if not __body:
+            if expand_wildcards is not None:
+                __body["expand_wildcards"] = expand_wildcards
             if feature_states is not None:
                 __body["feature_states"] = feature_states
             if ignore_unavailable is not None:
@@ -303,15 +334,26 @@ class SnapshotClient(NamespacedClient):
           IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.
           To register a snapshot repository, the cluster's global metadata must be writeable.
           Ensure there are no cluster blocks (for example, <code>cluster.blocks.read_only</code> and <code>clsuter.blocks.read_only_allow_delete</code> settings) that prevent write access.</p>
+          <p>Several options for this API can be specified using a query parameter or a request body parameter.
+          If both parameters are specified, only the query parameter is used.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository>`_
 
-        :param name: A repository name
+        :param name: The name of the snapshot repository to register or update.
         :param repository:
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
-        :param verify: Whether to verify the repository after creation
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. To indicate
+            that the request should never timeout, set it to `-1`.
+        :param verify: If `true`, the request verifies the repository is functional on
+            all master and data nodes in the cluster. If `false`, this verification is
+            skipped. You can also perform this verification with the verify snapshot
+            repository API.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -361,6 +403,7 @@ class SnapshotClient(NamespacedClient):
         human: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        wait_for_completion: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -368,11 +411,17 @@ class SnapshotClient(NamespacedClient):
           <p>Delete snapshots.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete>`_
 
-        :param repository: A repository name
-        :param snapshot: A comma-separated list of snapshot names
-        :param master_timeout: Explicit operation timeout for connection to master node
+        :param repository: The name of the repository to delete a snapshot from.
+        :param snapshot: A comma-separated list of snapshot names to delete. It also
+            accepts wildcards (`*`).
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param wait_for_completion: If `true`, the request returns a response when the
+            matching snapshots are all deleted. If `false`, the request returns a response
+            as soon as the deletes are scheduled.
         """
         if repository in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'repository'")
@@ -394,6 +443,8 @@ class SnapshotClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if wait_for_completion is not None:
+            __query["wait_for_completion"] = wait_for_completion
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "DELETE",
@@ -424,12 +475,18 @@ class SnapshotClient(NamespacedClient):
           The snapshots themselves are left untouched and in place.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-repo-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository>`_
 
-        :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns
-            are supported.
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
+        :param name: The ame of the snapshot repositories to unregister. Wildcard (`*`)
+            patterns are supported.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. To indicate
+            that the request should never timeout, set it to `-1`.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -493,56 +550,94 @@ class SnapshotClient(NamespacedClient):
                 ],
             ]
         ] = None,
+        state: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "FAILED",
+                            "INCOMPATIBLE",
+                            "IN_PROGRESS",
+                            "PARTIAL",
+                            "SUCCESS",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "FAILED", "INCOMPATIBLE", "IN_PROGRESS", "PARTIAL", "SUCCESS"
+                    ],
+                ],
+            ]
+        ] = None,
         verbose: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
           <p>Get snapshot information.</p>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-api.html>`_
-
-        :param repository: Comma-separated list of snapshot repository names used to
-            limit the request. Wildcard (*) expressions are supported.
-        :param snapshot: Comma-separated list of snapshot names to retrieve. Also accepts
-            wildcards (*). - To get information about all snapshots in a registered repository,
-            use a wildcard (*) or _all. - To get information about any snapshots that
-            are currently running, use _current.
-        :param after: Offset identifier to start pagination from as returned by the next
-            field in the response body.
-        :param from_sort_value: Value of the current sort column at which to start retrieval.
-            Can either be a string snapshot- or repository name when sorting by snapshot
-            or repository name, a millisecond time value or a number when sorting by
-            index- or shard count.
-        :param ignore_unavailable: If false, the request returns an error for any snapshots
+          <p>NOTE: The <code>after</code> parameter and <code>next</code> field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots.
+          It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration.
+          Snapshots concurrently created may be seen during an iteration.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get>`_
+
+        :param repository: A comma-separated list of snapshot repository names used to
+            limit the request. Wildcard (`*`) expressions are supported.
+        :param snapshot: A comma-separated list of snapshot names to retrieve Wildcards
+            (`*`) are supported. * To get information about all snapshots in a registered
+            repository, use a wildcard (`*`) or `_all`. * To get information about any
+            snapshots that are currently running, use `_current`.
+        :param after: An offset identifier to start pagination from as returned by the
+            next field in the response body.
+        :param from_sort_value: The value of the current sort column at which to start
+            retrieval. It can be a string `snapshot-` or a repository name when sorting
+            by snapshot or repository name. It can be a millisecond time value or a number
+            when sorting by `index-` or shard count.
+        :param ignore_unavailable: If `false`, the request returns an error for any snapshots
             that are unavailable.
-        :param include_repository: If true, returns the repository name in each snapshot.
-        :param index_details: If true, returns additional information about each index
-            in the snapshot comprising the number of shards in the index, the total size
-            of the index in bytes, and the maximum number of segments per shard in the
-            index. Defaults to false, meaning that this information is omitted.
-        :param index_names: If true, returns the name of each index in each snapshot.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
+        :param include_repository: If `true`, the response includes the repository name
+            in each snapshot.
+        :param index_details: If `true`, the response includes additional information
+            about each index in the snapshot comprising the number of shards in the index,
+            the total size of the index in bytes, and the maximum number of segments
+            per shard in the index. The default is `false`, meaning that this information
+            is omitted.
+        :param index_names: If `true`, the response includes the name of each index in
+            each snapshot.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
         :param offset: Numeric offset to start pagination from based on the snapshots
             matching this request. Using a non-zero value for this parameter is mutually
             exclusive with using the after parameter. Defaults to 0.
-        :param order: Sort order. Valid values are asc for ascending and desc for descending
-            order. Defaults to asc, meaning ascending order.
-        :param size: Maximum number of snapshots to return. Defaults to 0 which means
-            return all that match the request without limit.
-        :param slm_policy_filter: Filter snapshots by a comma-separated list of SLM policy
-            names that snapshots belong to. Also accepts wildcards (*) and combinations
-            of wildcards followed by exclude patterns starting with -. To include snapshots
-            not created by an SLM policy you can use the special pattern _none that will
-            match all snapshots without an SLM policy.
-        :param sort: Allows setting a sort order for the result. Defaults to start_time,
-            i.e. sorting by snapshot start time stamp.
-        :param verbose: If true, returns additional information about each snapshot such
-            as the version of Elasticsearch which took the snapshot, the start and end
-            times of the snapshot, and the number of shards snapshotted.
+        :param order: The sort order. Valid values are `asc` for ascending and `desc`
+            for descending order. The default behavior is ascending order.
+        :param size: The maximum number of snapshots to return. The default is 0, which
+            means to return all that match the request without limit.
+        :param slm_policy_filter: Filter snapshots by a comma-separated list of snapshot
+            lifecycle management (SLM) policy names that snapshots belong to. You can
+            use wildcards (`*`) and combinations of wildcards followed by exclude patterns
+            starting with `-`. For example, the pattern `*,-policy-a-\\*` will return
+            all snapshots except for those that were created by an SLM policy with a
+            name starting with `policy-a-`. Note that the wildcard pattern `*` matches
+            all snapshots created by an SLM policy but not those snapshots that were
+            not created by an SLM policy. To include snapshots that were not created
+            by an SLM policy, you can use the special pattern `_none` that will match
+            all snapshots without an SLM policy.
+        :param sort: The sort order for the result. The default behavior is sorting by
+            snapshot start time stamp.
+        :param state: Only return snapshots with a state found in the given comma-separated
+            list of snapshot states. The default is all snapshot states.
+        :param verbose: If `true`, returns additional information about each snapshot
+            such as the version of Elasticsearch which took the snapshot, the start and
+            end times of the snapshot, and the number of shards snapshotted. NOTE: The
+            parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`,
+            and `sort` are not supported when you set `verbose=false` and the sort order
+            for requests with `verbose=false` is undefined.
         """
         if repository in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'repository'")
@@ -586,6 +681,8 @@ class SnapshotClient(NamespacedClient):
             __query["slm_policy_filter"] = slm_policy_filter
         if sort is not None:
             __query["sort"] = sort
+        if state is not None:
+            __query["state"] = state
         if verbose is not None:
             __query["verbose"] = verbose
         __headers = {"accept": "application/json"}
@@ -616,12 +713,18 @@ class SnapshotClient(NamespacedClient):
           <p>Get snapshot repository information.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository>`_
 
-        :param name: A comma-separated list of repository names
-        :param local: Return local information, do not retrieve the state from master
-            node (default: false)
-        :param master_timeout: Explicit operation timeout for connection to master node
+        :param name: A comma-separated list of snapshot repository names used to limit
+            the request. Wildcard (`*`) expressions are supported including combining
+            wildcards with exclude patterns starting with `-`. To get information about
+            all snapshot repositories registered in the cluster, omit this parameter
+            or use `*` or `_all`.
+        :param local: If `true`, the request gets information from the local node only.
+            If `false`, the request gets information from the master node.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
         """
         __path_parts: t.Dict[str, str]
         if name not in SKIP_IN_PATH:
@@ -678,20 +781,28 @@ class SnapshotClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Analyze a snapshot repository.
-          Analyze the performance characteristics and any incorrect behaviour found in a repository.</p>
-          <p>The response exposes implementation details of the analysis which may change from version to version.
-          The response body format is therefore not considered stable and may be different in newer versions.</p>
+          <p>Analyze a snapshot repository.</p>
+          <p>Performs operations on a snapshot repository in order to check for incorrect behaviour.</p>
           <p>There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch.
-          Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.</p>
+          Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do.
+          This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.</p>
           <p>The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations.
           Run your first analysis with the default parameter values to check for simple problems.
-          If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a <code>blob_count</code> of at least <code>2000</code>, a <code>max_blob_size</code> of at least <code>2gb</code>, a <code>max_total_data_size</code> of at least <code>1tb</code>, and a <code>register_operation_count</code> of at least <code>100</code>.
+          Some repositories may behave correctly when lightly loaded but incorrectly under production-like workloads.
+          If the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a <code>blob_count</code> of at least <code>2000</code>, a <code>max_blob_size</code> of at least <code>2gb</code>, a <code>max_total_data_size</code> of at least <code>1tb</code>, and a <code>register_operation_count</code> of at least <code>100</code>.
           Always specify a generous timeout, possibly <code>1h</code> or longer, to allow time for each analysis to run to completion.
+          Some repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster.
           Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once.</p>
           <p>If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly.
           This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support.
           If so, this storage system is not suitable for use as a snapshot repository.
+          Repository analysis triggers conditions that occur only rarely when taking snapshots in a production system.
+          Snapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures.
+          However your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis.
+          You can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using.
+          For instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis.
+          This allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch.
+          Please do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol.
           You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects.</p>
           <p>If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took.
           You can use this information to determine the performance of your storage system.
@@ -719,14 +830,17 @@ class SnapshotClient(NamespacedClient):
           This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself.
           You must ensure this load does not affect other users of these systems.
           Analyses respect the repository settings <code>max_snapshot_bytes_per_sec</code> and <code>max_restore_bytes_per_sec</code> if available and the cluster setting <code>indices.recovery.max_bytes_per_sec</code> which you can use to limit the bandwidth they consume.</p>
-          <p>NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.</p>
+          <p>NOTE: This API is intended for exploratory use by humans.
+          You should expect the request parameters and the response format to vary in future versions.
+          The response exposes immplementation details of the analysis which may change from version to version.</p>
           <p>NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones.
           A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version.
           This indicates it behaves incorrectly in ways that the former version did not detect.
           You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch.</p>
           <p>NOTE: This API may not work correctly in a mixed-version cluster.</p>
           <p><em>Implementation details</em></p>
-          <p>NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions.</p>
+          <p>NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions.
+          The request parameters and response format depend on details of the implementation so may also be different in newer versions.</p>
           <p>The analysis comprises a number of blob-level tasks, as set by the <code>blob_count</code> parameter and a number of compare-and-exchange operations on linearizable registers, as set by the <code>register_operation_count</code> parameter.
           These tasks are distributed over the data and master-eligible nodes in the cluster for execution.</p>
           <p>For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote.
@@ -754,39 +868,44 @@ class SnapshotClient(NamespacedClient):
           Some operations also verify the behavior on small blobs with sizes other than 8 bytes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/repo-analysis-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze>`_
 
         :param name: The name of the repository.
         :param blob_count: The total number of blobs to write to the repository during
-            the test. For realistic experiments, you should set it to at least `2000`.
+            the test. For realistic experiments, set this parameter to at least `2000`.
         :param concurrency: The number of operations to run concurrently during the test.
+            For realistic experiments, leave this parameter unset.
         :param detailed: Indicates whether to return detailed results, including timing
             information for every operation performed during the analysis. If false,
             it returns only a summary of the analysis.
         :param early_read_node_count: The number of nodes on which to perform an early
             read operation while writing each blob. Early read operations are only rarely
-            performed.
+            performed. For realistic experiments, leave this parameter unset.
         :param max_blob_size: The maximum size of a blob to be written during the test.
-            For realistic experiments, you should set it to at least `2gb`.
+            For realistic experiments, set this parameter to at least `2gb`.
         :param max_total_data_size: An upper limit on the total size of all the blobs
-            written during the test. For realistic experiments, you should set it to
+            written during the test. For realistic experiments, set this parameter to
             at least `1tb`.
         :param rare_action_probability: The probability of performing a rare action such
-            as an early read, an overwrite, or an aborted write on each blob.
+            as an early read, an overwrite, or an aborted write on each blob. For realistic
+            experiments, leave this parameter unset.
         :param rarely_abort_writes: Indicates whether to rarely cancel writes before
-            they complete.
+            they complete. For realistic experiments, leave this parameter unset.
         :param read_node_count: The number of nodes on which to read a blob after writing.
+            For realistic experiments, leave this parameter unset.
         :param register_operation_count: The minimum number of linearizable register
-            operations to perform in total. For realistic experiments, you should set
-            it to at least `100`.
+            operations to perform in total. For realistic experiments, set this parameter
+            to at least `100`.
         :param seed: The seed for the pseudo-random number generator used to generate
             the list of operations performed during the test. To repeat the same set
             of operations in multiple experiments, use the same seed in each experiment.
             Note that the operations are performed concurrently so might not always happen
-            in the same order on each run.
+            in the same order on each run. For realistic experiments, leave this parameter
+            unset.
         :param timeout: The period of time to wait for the test to complete. If no response
             is received before the timeout expires, the test is cancelled and returns
-            an error.
+            an error. For realistic experiments, set this parameter sufficiently long
+            to allow the test to complete.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -879,21 +998,39 @@ class SnapshotClient(NamespacedClient):
           It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting.</p>
           <p>NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.</p>
           <p>NOTE: This API may not work correctly in a mixed-version cluster.</p>
+          <p>The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster.
+          For instance, by default it will only use at most half of the <code>snapshot_meta</code> threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool.
+          If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster.
+          For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API.</p>
+          <p>The response exposes implementation details of the analysis which may change from version to version.
+          The response body format is therefore not considered stable and may be different in newer versions.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-repo-integrity-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity>`_
 
-        :param name: A repository name
-        :param blob_thread_pool_concurrency: Number of threads to use for reading blob
-            contents
-        :param index_snapshot_verification_concurrency: Number of snapshots to verify
-            concurrently within each index
-        :param index_verification_concurrency: Number of indices to verify concurrently
-        :param max_bytes_per_sec: Rate limit for individual blob verification
-        :param max_failed_shard_snapshots: Maximum permitted number of failed shard snapshots
-        :param meta_thread_pool_concurrency: Number of threads to use for reading metadata
-        :param snapshot_verification_concurrency: Number of snapshots to verify concurrently
-        :param verify_blob_contents: Whether to verify the contents of individual blobs
+        :param name: The name of the snapshot repository.
+        :param blob_thread_pool_concurrency: If `verify_blob_contents` is `true`, this
+            parameter specifies how many blobs to verify at once.
+        :param index_snapshot_verification_concurrency: The maximum number of index snapshots
+            to verify concurrently within each index verification.
+        :param index_verification_concurrency: The number of indices to verify concurrently.
+            The default behavior is to use the entire `snapshot_meta` thread pool.
+        :param max_bytes_per_sec: If `verify_blob_contents` is `true`, this parameter
+            specifies the maximum amount of data that Elasticsearch will read from the
+            repository every second.
+        :param max_failed_shard_snapshots: The number of shard snapshot failures to track
+            during integrity verification, in order to avoid excessive resource usage.
+            If your repository contains more than this number of shard snapshot failures,
+            the verification will fail.
+        :param meta_thread_pool_concurrency: The maximum number of snapshot metadata
+            operations to run concurrently. The default behavior is to use at most half
+            of the `snapshot_meta` thread pool at once.
+        :param snapshot_verification_concurrency: The number of snapshots to verify concurrently.
+            The default behavior is to use at most half of the `snapshot_meta` thread
+            pool at once.
+        :param verify_blob_contents: Indicates whether to verify the checksum of every
+            data blob in the repository. If this feature is enabled, Elasticsearch will
+            read the entire repository contents, which may be extremely slow and expensive.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -991,23 +1128,66 @@ class SnapshotClient(NamespacedClient):
           <p>If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/restore-snapshot-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore>`_
 
-        :param repository: A repository name
-        :param snapshot: A snapshot name
-        :param feature_states:
-        :param ignore_index_settings:
-        :param ignore_unavailable:
-        :param include_aliases:
-        :param include_global_state:
-        :param index_settings:
-        :param indices:
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param partial:
-        :param rename_pattern:
-        :param rename_replacement:
-        :param wait_for_completion: Should this request wait until the operation has
-            completed before returning
+        :param repository: The name of the repository to restore a snapshot from.
+        :param snapshot: The name of the snapshot to restore.
+        :param feature_states: The feature states to restore. If `include_global_state`
+            is `true`, the request restores all feature states in the snapshot by default.
+            If `include_global_state` is `false`, the request restores no feature states
+            by default. Note that specifying an empty array will result in the default
+            behavior. To restore no feature states, regardless of the `include_global_state`
+            value, specify an array containing only the value `none` (`["none"]`).
+        :param ignore_index_settings: The index settings to not restore from the snapshot.
+            You can't use this option to ignore `index.number_of_shards`. For data streams,
+            this option applies only to restored backing indices. New backing indices
+            are configured using the data stream's matching index template.
+        :param ignore_unavailable: If `true`, the request ignores any index or data stream
+            in indices that's missing from the snapshot. If `false`, the request returns
+            an error for any missing index or data stream.
+        :param include_aliases: If `true`, the request restores aliases for any restored
+            data streams and indices. If `false`, the request doesn’t restore aliases.
+        :param include_global_state: If `true`, restore the cluster state. The cluster
+            state includes: * Persistent cluster settings * Index templates * Legacy
+            index templates * Ingest pipelines * Index lifecycle management (ILM) policies
+            * Stored scripts * For snapshots taken after 7.12.0, feature states If `include_global_state`
+            is `true`, the restore operation merges the legacy index templates in your
+            cluster with the templates contained in the snapshot, replacing any existing
+            ones whose name matches one in the snapshot. It completely removes all persistent
+            settings, non-legacy index templates, ingest pipelines, and ILM lifecycle
+            policies that exist in your cluster and replaces them with the corresponding
+            items from the snapshot. Use the `feature_states` parameter to configure
+            how feature states are restored. If `include_global_state` is `true` and
+            a snapshot was created without a global state then the restore request will
+            fail.
+        :param index_settings: Index settings to add or change in restored indices, including
+            backing indices. You can't use this option to change `index.number_of_shards`.
+            For data streams, this option applies only to restored backing indices. New
+            backing indices are configured using the data stream's matching index template.
+        :param indices: A comma-separated list of indices and data streams to restore.
+            It supports a multi-target syntax. The default behavior is all regular indices
+            and regular data streams in the snapshot. You can't use this parameter to
+            restore system indices or system data streams. Use `feature_states` instead.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param partial: If `false`, the entire restore operation will fail if one or
+            more indices included in the snapshot do not have all primary shards available.
+            If true, it allows restoring a partial snapshot of indices with unavailable
+            shards. Only shards that were successfully included in the snapshot will
+            be restored. All missing shards will be recreated as empty.
+        :param rename_pattern: A rename pattern to apply to restored data streams and
+            indices. Data streams and indices matching the rename pattern will be renamed
+            according to `rename_replacement`. The rename pattern is applied as defined
+            by the regular expression that supports referencing the original text, according
+            to the `appendReplacement` logic.
+        :param rename_replacement: The rename replacement string that is used with the
+            `rename_pattern`.
+        :param wait_for_completion: If `true`, the request returns a response when the
+            restore operation completes. The operation is complete when it finishes all
+            attempts to recover primary shards for restored indices. This applies even
+            if one or more of the recovery attempts fail. If `false`, the request returns
+            a response when the restore operation initializes.
         """
         if repository in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'repository'")
@@ -1085,9 +1265,17 @@ class SnapshotClient(NamespacedClient):
         .. raw:: html
 
           <p>Get the snapshot status.
-          Get a detailed description of the current state for each shard participating in the snapshot.
-          Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots.
+          Get a detailed description of the current state for each shard participating in the snapshot.</p>
+          <p>Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots.
           If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.</p>
+          <p>If you omit the <code>&lt;snapshot&gt;</code> request path parameter, the request retrieves information only for currently running snapshots.
+          This usage is preferred.
+          If needed, you can specify <code>&lt;repository&gt;</code> and <code>&lt;snapshot&gt;</code> to retrieve information for specific snapshots, even if they're not currently running.</p>
+          <p>Note that the stats will not be available for any shard snapshots in an ongoing snapshot completed by a node that (even momentarily) left the cluster.
+          Loading the stats from the repository is an expensive operation (see the WARNING below).
+          Therefore the stats values for such shards will be -1 even though the &quot;stage&quot; value will be &quot;DONE&quot;, in order to minimize latency.
+          A &quot;description&quot; field will be present for a shard snapshot completed by a departed node explaining why the shard snapshot's stats results are invalid.
+          Consequently, the total stats for the index will be less than expected due to the missing values from these shards.</p>
           <p>WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.
           The API requires a read from the repository for each shard in each snapshot.
           For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).</p>
@@ -1095,13 +1283,18 @@ class SnapshotClient(NamespacedClient):
           These requests can also tax machine resources and, when using cloud storage, incur high processing costs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status>`_
 
-        :param repository: A repository name
-        :param snapshot: A comma-separated list of snapshot names
-        :param ignore_unavailable: Whether to ignore unavailable snapshots, defaults
-            to false which means a SnapshotMissingException is thrown
-        :param master_timeout: Explicit operation timeout for connection to master node
+        :param repository: The snapshot repository name used to limit the request. It
+            supports wildcards (`*`) if `<snapshot>` isn't specified.
+        :param snapshot: A comma-separated list of snapshots to retrieve status for.
+            The default is currently running snapshots. Wildcards (`*`) are not supported.
+        :param ignore_unavailable: If `false`, the request returns an error for any snapshots
+            that are unavailable. If `true`, the request ignores snapshots that are unavailable,
+            such as those that are corrupted or temporarily cannot be returned.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
         """
         __path_parts: t.Dict[str, str]
         if repository not in SKIP_IN_PATH and snapshot not in SKIP_IN_PATH:
@@ -1158,11 +1351,17 @@ class SnapshotClient(NamespacedClient):
           Check for common misconfigurations in a snapshot repository.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-snapshot-repo-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository>`_
 
-        :param name: A repository name
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
+        :param name: The name of the snapshot repository to verify.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. To indicate
+            that the request should never timeout, set it to `-1`.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
diff -pruN 8.17.2-2/elasticsearch/_async/client/sql.py 9.2.0-1/elasticsearch/_async/client/sql.py
--- 8.17.2-2/elasticsearch/_async/client/sql.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/sql.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class SqlClient(NamespacedClient):
           <p>Clear an SQL search cursor.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-sql-cursor-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor>`_
 
         :param cursor: Cursor to clear.
         """
@@ -99,7 +99,7 @@ class SqlClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-async-sql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async>`_
 
         :param id: The identifier for the search.
         """
@@ -150,7 +150,7 @@ class SqlClient(NamespacedClient):
           <p>If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-sql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async>`_
 
         :param id: The identifier for the search.
         :param delimiter: The separator for CSV results. The API supports this parameter
@@ -212,7 +212,7 @@ class SqlClient(NamespacedClient):
           Get the current status of an async SQL search or a stored synchronous SQL search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-sql-search-status-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status>`_
 
         :param id: The identifier for the search.
         """
@@ -283,8 +283,9 @@ class SqlClient(NamespacedClient):
         keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         keep_on_completion: t.Optional[bool] = None,
         page_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        params: t.Optional[t.Mapping[str, t.Any]] = None,
+        params: t.Optional[t.Sequence[t.Any]] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         query: t.Optional[str] = None,
         request_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
@@ -301,7 +302,7 @@ class SqlClient(NamespacedClient):
           Run an SQL request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/sql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query>`_
 
         :param allow_partial_search_results: If `true`, the response has partial results
             when there are shard request timeouts or shard failures. If `false`, the
@@ -332,6 +333,10 @@ class SqlClient(NamespacedClient):
             is no longer available. Subsequent scroll requests prolong the lifetime of
             the scroll cursor by the duration of `page_timeout` in the scroll request.
         :param params: The values for parameters in the query.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param query: The SQL query to run.
         :param request_timeout: The timeout before the request fails.
         :param runtime_mappings: One or more runtime fields for the search request. These
@@ -357,6 +362,8 @@ class SqlClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if not __body:
             if allow_partial_search_results is not None:
                 __body["allow_partial_search_results"] = allow_partial_search_results
@@ -427,7 +434,7 @@ class SqlClient(NamespacedClient):
           It accepts the same request body parameters as the SQL search API, excluding <code>cursor</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/sql-translate-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate>`_
 
         :param query: The SQL query to run.
         :param fetch_size: The maximum number of rows (or entries) to return in one response.
diff -pruN 8.17.2-2/elasticsearch/_async/client/ssl.py 9.2.0-1/elasticsearch/_async/client/ssl.py
--- 8.17.2-2/elasticsearch/_async/client/ssl.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/ssl.py	2025-10-28 16:50:53.000000000 +0000
@@ -52,7 +52,7 @@ class SslClient(NamespacedClient):
           <p>If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-ssl.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ssl/certificates"
diff -pruN 8.17.2-2/elasticsearch/_async/client/streams.py 9.2.0-1/elasticsearch/_async/client/streams.py
--- 8.17.2-2/elasticsearch/_async/client/streams.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/streams.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,185 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+import typing as t
+
+from elastic_transport import ObjectApiResponse, TextApiResponse
+
+from ._base import NamespacedClient
+from .utils import (
+    Stability,
+    _rewrite_parameters,
+    _stability_warning,
+)
+
+
+class StreamsClient(NamespacedClient):
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def logs_disable(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
+        """
+        .. raw:: html
+
+          <p>Disable logs stream.</p>
+          <p>Turn off the logs stream feature for this cluster.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch#TODO>`_
+
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_streams/logs/_disable"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json,text/plain"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="streams.logs_disable",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def logs_enable(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
+        """
+        .. raw:: html
+
+          <p>Enable logs stream.</p>
+          <p>Turn on the logs stream feature for this cluster.</p>
+          <p>NOTE: To protect existing data, this feature can be turned on only if the
+          cluster does not have existing indices or data streams that match the pattern <code>logs|logs.*</code>.
+          If those indices or data streams exist, a <code>409 - Conflict</code> response and error is returned.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch#TODO>`_
+
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_streams/logs/_enable"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json,text/plain"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="streams.logs_enable",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    async def status(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get the status of streams.</p>
+          <p>Get the current status for all types of streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch#TODO>`_
+
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_streams/status"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="streams.status",
+            path_parts=__path_parts,
+        )
diff -pruN 8.17.2-2/elasticsearch/_async/client/synonyms.py 9.2.0-1/elasticsearch/_async/client/synonyms.py
--- 8.17.2-2/elasticsearch/_async/client/synonyms.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/synonyms.py	2025-10-28 16:50:53.000000000 +0000
@@ -53,7 +53,7 @@ class SynonymsClient(NamespacedClient):
           When the synonyms set is not used in analyzers, you will be able to delete it.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-synonyms-set.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym>`_
 
         :param id: The synonyms set identifier to delete.
         """
@@ -90,6 +90,7 @@ class SynonymsClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        refresh: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -98,10 +99,13 @@ class SynonymsClient(NamespacedClient):
           Delete a synonym rule from a synonym set.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-synonym-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule>`_
 
         :param set_id: The ID of the synonym set to update.
         :param rule_id: The ID of the synonym rule to delete.
+        :param refresh: If `true`, the request will refresh the analyzers with the deleted
+            synonym rule and wait for the new synonyms to be available before returning.
+            If `false`, analyzers will not be reloaded with the deleted synonym rule
         """
         if set_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'set_id'")
@@ -121,6 +125,8 @@ class SynonymsClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if refresh is not None:
+            __query["refresh"] = refresh
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "DELETE",
@@ -151,7 +157,7 @@ class SynonymsClient(NamespacedClient):
           <p>Get a synonym set.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-synonyms-set.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym>`_
 
         :param id: The synonyms set identifier to retrieve.
         :param from_: The starting offset for query rules to retrieve.
@@ -202,7 +208,7 @@ class SynonymsClient(NamespacedClient):
           Get a synonym rule from a synonym set.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-synonym-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule>`_
 
         :param set_id: The ID of the synonym set to retrieve the synonym rule from.
         :param rule_id: The ID of the synonym rule to retrieve.
@@ -255,7 +261,7 @@ class SynonymsClient(NamespacedClient):
           Get a summary of all defined synonym sets.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-synonyms-set.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym>`_
 
         :param from_: The starting offset for synonyms sets to retrieve.
         :param size: The maximum number of synonyms sets to retrieve.
@@ -299,6 +305,7 @@ class SynonymsClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        refresh: t.Optional[bool] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -309,12 +316,16 @@ class SynonymsClient(NamespacedClient):
           If you need to manage more synonym rules, you can create multiple synonym sets.</p>
           <p>When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices.
           This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.</p>
+          <p>For practical examples of how to create or update a synonyms set, refer to the External documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-synonyms-set.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym>`_
 
         :param id: The ID of the synonyms set to be created or updated.
         :param synonyms_set: The synonym rules definitions for the synonyms set.
+        :param refresh: If `true`, the request will refresh the analyzers with the new
+            synonyms set and wait for the new synonyms to be available before returning.
+            If `false`, analyzers will not be reloaded with the new synonym set
         """
         if id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'id'")
@@ -332,6 +343,8 @@ class SynonymsClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if refresh is not None:
+            __query["refresh"] = refresh
         if not __body:
             if synonyms_set is not None:
                 __body["synonyms_set"] = synonyms_set
@@ -359,6 +372,7 @@ class SynonymsClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        refresh: t.Optional[bool] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -370,12 +384,15 @@ class SynonymsClient(NamespacedClient):
           <p>When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-synonym-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule>`_
 
         :param set_id: The ID of the synonym set.
         :param rule_id: The ID of the synonym rule to be updated or created.
         :param synonyms: The synonym rule information definition, which must be in Solr
             format.
+        :param refresh: If `true`, the request will refresh the analyzers with the new
+            synonym rule and wait for the new synonyms to be available before returning.
+            If `false`, analyzers will not be reloaded with the new synonym rule
         """
         if set_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'set_id'")
@@ -398,6 +415,8 @@ class SynonymsClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if refresh is not None:
+            __query["refresh"] = refresh
         if not __body:
             if synonyms is not None:
                 __body["synonyms"] = synonyms
diff -pruN 8.17.2-2/elasticsearch/_async/client/tasks.py 9.2.0-1/elasticsearch/_async/client/tasks.py
--- 8.17.2-2/elasticsearch/_async/client/tasks.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/tasks.py	2025-10-28 16:50:53.000000000 +0000
@@ -36,7 +36,7 @@ class TasksClient(NamespacedClient):
     async def cancel(
         self,
         *,
-        task_id: t.Optional[t.Union[int, str]] = None,
+        task_id: t.Optional[str] = None,
         actions: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -60,7 +60,7 @@ class TasksClient(NamespacedClient):
           You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks>`_
 
         :param task_id: The task identifier.
         :param actions: A comma-separated list or wildcard expression of actions that
@@ -128,7 +128,7 @@ class TasksClient(NamespacedClient):
           <p>If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks>`_
 
         :param task_id: The task identifier.
         :param timeout: The period to wait for a response. If no response is received
@@ -176,7 +176,6 @@ class TasksClient(NamespacedClient):
             t.Union[str, t.Literal["nodes", "none", "parents"]]
         ] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         nodes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         parent_task_id: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
@@ -239,7 +238,7 @@ class TasksClient(NamespacedClient):
           The <code>X-Opaque-Id</code> in the children <code>headers</code> is the child task of the task that was initiated by the REST request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks>`_
 
         :param actions: A comma-separated list or wildcard expression of actions used
             to limit the request. For example, you can use `cluser:*` to retrieve all
@@ -249,9 +248,6 @@ class TasksClient(NamespacedClient):
             other but is more costly to run.
         :param group_by: A key that is used to group tasks in the response. The task
             lists can be grouped either by nodes or by parent tasks.
-        :param master_timeout: The period to wait for a connection to the master node.
-            If no response is received before the timeout expires, the request fails
-            and returns an error.
         :param nodes: A comma-separated list of node IDs or names that is used to limit
             the returned information.
         :param parent_task_id: A parent task identifier that is used to limit returned
@@ -278,8 +274,6 @@ class TasksClient(NamespacedClient):
             __query["group_by"] = group_by
         if human is not None:
             __query["human"] = human
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if nodes is not None:
             __query["nodes"] = nodes
         if parent_task_id is not None:
diff -pruN 8.17.2-2/elasticsearch/_async/client/text_structure.py 9.2.0-1/elasticsearch/_async/client/text_structure.py
--- 8.17.2-2/elasticsearch/_async/client/text_structure.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/text_structure.py	2025-10-28 16:50:53.000000000 +0000
@@ -72,7 +72,7 @@ class TextStructureClient(NamespacedClie
           It helps determine why the returned structure was chosen.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-field-structure.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure>`_
 
         :param field: The field that should be analyzed.
         :param index: The name of the index that contains the analyzed field.
@@ -259,7 +259,7 @@ class TextStructureClient(NamespacedClie
           It helps determine why the returned structure was chosen.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-message-structure.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure>`_
 
         :param messages: The list of messages you want to analyze.
         :param column_names: If the format is `delimited`, you can specify the column
@@ -433,7 +433,7 @@ class TextStructureClient(NamespacedClie
           However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-structure.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure>`_
 
         :param text_files:
         :param charset: The text's character set. It must be a character set that is
@@ -620,7 +620,7 @@ class TextStructureClient(NamespacedClie
           The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-grok-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern>`_
 
         :param grok_pattern: The Grok pattern to run on the text.
         :param text: The lines of text to run the Grok pattern on.
diff -pruN 8.17.2-2/elasticsearch/_async/client/transform.py 9.2.0-1/elasticsearch/_async/client/transform.py
--- 8.17.2-2/elasticsearch/_async/client/transform.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/transform.py	2025-10-28 16:50:53.000000000 +0000
@@ -41,11 +41,10 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a transform.
-          Deletes a transform.</p>
+          <p>Delete a transform.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform>`_
 
         :param transform_id: Identifier for the transform.
         :param delete_dest_index: If this value is true, the destination index is deleted
@@ -106,10 +105,10 @@ class TransformClient(NamespacedClient):
         .. raw:: html
 
           <p>Get transforms.
-          Retrieves configuration information for transforms.</p>
+          Get configuration information for transforms.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform>`_
 
         :param transform_id: Identifier for the transform. It can be a transform identifier
             or a wildcard expression. You can get information for all transforms by using
@@ -178,11 +177,11 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get transform stats.
-          Retrieves usage information for transforms.</p>
+          <p>Get transform stats.</p>
+          <p>Get usage information for transforms.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-transform-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats>`_
 
         :param transform_id: Identifier for the transform. It can be a transform identifier
             or a wildcard expression. You can get information for all transforms by using
@@ -270,7 +269,7 @@ class TransformClient(NamespacedClient):
           types of the source index and the transform aggregations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/preview-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform>`_
 
         :param transform_id: Identifier for the transform to preview. If you specify
             this path parameter, you cannot provide transform configuration details in
@@ -407,7 +406,7 @@ class TransformClient(NamespacedClient):
           give users any privileges on <code>.data-frame-internal*</code> indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform>`_
 
         :param transform_id: Identifier for the transform. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -503,17 +502,17 @@ class TransformClient(NamespacedClient):
         force: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Reset a transform.
-          Resets a transform.
-          Before you can reset it, you must stop it; alternatively, use the <code>force</code> query parameter.
+          <p>Reset a transform.</p>
+          <p>Before you can reset it, you must stop it; alternatively, use the <code>force</code> query parameter.
           If the destination index was created by the transform, it is deleted.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/reset-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform>`_
 
         :param transform_id: Identifier for the transform. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -521,6 +520,8 @@ class TransformClient(NamespacedClient):
         :param force: If this value is `true`, the transform is reset regardless of its
             current state. If it's `false`, the transform must be stopped before it can
             be reset.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         if transform_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'transform_id'")
@@ -537,6 +538,8 @@ class TransformClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -561,15 +564,15 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Schedule a transform to start now.
-          Instantly runs a transform to process data.</p>
-          <p>If you _schedule_now a transform, it will process the new data instantly,
-          without waiting for the configured frequency interval. After _schedule_now API is called,
-          the transform will be processed again at now + frequency unless _schedule_now API
+          <p>Schedule a transform to start now.</p>
+          <p>Instantly run a transform to process data.
+          If you run this API, the transform will process the new data instantly,
+          without waiting for the configured frequency interval. After the API is called,
+          the transform will be processed again at <code>now + frequency</code> unless the API
           is called again in the meantime.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/schedule-now-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform>`_
 
         :param transform_id: Identifier for the transform.
         :param timeout: Controls the time to wait for the scheduling to take place
@@ -599,6 +602,66 @@ class TransformClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters()
+    async def set_upgrade_mode(
+        self,
+        *,
+        enabled: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Set upgrade_mode for transform indices.
+          Sets a cluster wide upgrade_mode setting that prepares transform
+          indices for an upgrade.
+          When upgrading your cluster, in some circumstances you must restart your
+          nodes and reindex your transform indices. In those circumstances,
+          there must be no transforms running. You can close the transforms,
+          do the upgrade, then open all the transforms again. Alternatively,
+          you can use this API to temporarily halt tasks associated with the transforms
+          and prevent new transforms from opening. You can also use this API
+          during upgrades that do not require you to reindex your transform
+          indices, though stopping transforms is not a requirement in that case.
+          You can see the current value for the upgrade_mode setting by using the get
+          transform info API.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-set-upgrade-mode>`_
+
+        :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts
+            all transform tasks and prohibits new transform tasks from starting.
+        :param timeout: The time to wait for the request to be completed.
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_transform/set_upgrade_mode"
+        __query: t.Dict[str, t.Any] = {}
+        if enabled is not None:
+            __query["enabled"] = enabled
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json"}
+        return await self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="transform.set_upgrade_mode",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters(
         parameter_aliases={"from": "from_"},
     )
@@ -616,8 +679,7 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Start a transform.
-          Starts a transform.</p>
+          <p>Start a transform.</p>
           <p>When you start a transform, it creates the destination index if it does not already exist. The <code>number_of_shards</code> is
           set to <code>1</code> and the <code>auto_expand_replicas</code> is set to <code>0-1</code>. If it is a pivot transform, it deduces the mapping
           definitions for the destination index from the source indices and the transform aggregations. If fields in the
@@ -633,7 +695,7 @@ class TransformClient(NamespacedClient):
           destination indices, the transform fails when it attempts unauthorized operations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform>`_
 
         :param transform_id: Identifier for the transform.
         :param from_: Restricts the set of transformed entities to those changed after
@@ -691,7 +753,7 @@ class TransformClient(NamespacedClient):
           Stops one or more transforms.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/stop-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform>`_
 
         :param transform_id: Identifier for the transform. To stop multiple transforms,
             use a comma-separated list or a wildcard expression. To stop all transforms,
@@ -793,7 +855,7 @@ class TransformClient(NamespacedClient):
           time of update and runs with those privileges.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform>`_
 
         :param transform_id: Identifier for the transform.
         :param defer_validation: When true, deferrable validations are not run. This
@@ -874,8 +936,8 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Upgrade all transforms.
-          Transforms are compatible across minor versions and between supported major versions.
+          <p>Upgrade all transforms.</p>
+          <p>Transforms are compatible across minor versions and between supported major versions.
           However, over time, the format of transform configuration information may change.
           This API identifies transforms that have a legacy configuration format and upgrades them to the latest version.
           It also cleans up the internal data structures that store the transform state and checkpoints.
@@ -888,7 +950,7 @@ class TransformClient(NamespacedClient):
           You may want to perform a recent cluster backup prior to the upgrade.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/upgrade-transforms.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms>`_
 
         :param dry_run: When true, the request checks for updates but does not run them.
         :param timeout: Period to wait for a response. If no response is received before
diff -pruN 8.17.2-2/elasticsearch/_async/client/watcher.py 9.2.0-1/elasticsearch/_async/client/watcher.py
--- 8.17.2-2/elasticsearch/_async/client/watcher.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/watcher.py	2025-10-28 16:50:53.000000000 +0000
@@ -45,10 +45,11 @@ class WatcherClient(NamespacedClient):
           <p>IMPORTANT: If the specified watch is currently being executed, this API will return an error
           The reason for this behavior is to prevent overwriting the watch status from a watch execution.</p>
           <p>Acknowledging an action throttles further executions of that action until its <code>ack.state</code> is reset to <code>awaits_successful_execution</code>.
-          This happens when the condition of the watch is not met (the condition evaluates to false).</p>
+          This happens when the condition of the watch is not met (the condition evaluates to false).
+          To demonstrate how throttling works in practice and how it can be configured for individual actions within a watch, refer to External documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-ack-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch>`_
 
         :param watch_id: The watch identifier.
         :param action_id: A comma-separated list of the action identifiers to acknowledge.
@@ -104,7 +105,7 @@ class WatcherClient(NamespacedClient):
           A watch can be either active or inactive.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-activate-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch>`_
 
         :param watch_id: The watch identifier.
         """
@@ -148,7 +149,7 @@ class WatcherClient(NamespacedClient):
           A watch can be either active or inactive.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-deactivate-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch>`_
 
         :param watch_id: The watch identifier.
         """
@@ -196,7 +197,7 @@ class WatcherClient(NamespacedClient):
           When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the <code>.watches</code> index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-delete-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch>`_
 
         :param id: The watch identifier.
         """
@@ -274,10 +275,11 @@ class WatcherClient(NamespacedClient):
           This serves as great tool for testing and debugging your watches prior to adding them to Watcher.</p>
           <p>When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.
           If your user is allowed to read index <code>a</code>, but not index <code>b</code>, then the exact same set of rules will apply during execution of a watch.</p>
-          <p>When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.</p>
+          <p>When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.
+          Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-execute-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch>`_
 
         :param id: The watch identifier.
         :param action_modes: Determines how to handle the watch actions as part of the
@@ -365,7 +367,7 @@ class WatcherClient(NamespacedClient):
           Only a subset of settings are shown, for example <code>index.auto_expand_replicas</code> and <code>index.number_of_replicas</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-get-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -410,7 +412,7 @@ class WatcherClient(NamespacedClient):
           <p>Get a watch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-get-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch>`_
 
         :param id: The watch identifier.
         """
@@ -485,7 +487,7 @@ class WatcherClient(NamespacedClient):
           If the user is able to read index <code>a</code>, but not index <code>b</code>, the same will apply when the watch runs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-put-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch>`_
 
         :param id: The identifier for the watch.
         :param actions: The list of actions that will be run if the condition matches.
@@ -550,11 +552,7 @@ class WatcherClient(NamespacedClient):
                 __body["transform"] = transform
             if trigger is not None:
                 __body["trigger"] = trigger
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return await self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -579,7 +577,7 @@ class WatcherClient(NamespacedClient):
         pretty: t.Optional[bool] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         size: t.Optional[int] = None,
         sort: t.Optional[
@@ -598,7 +596,7 @@ class WatcherClient(NamespacedClient):
           <p>Note that only the <code>_id</code> and <code>metadata.*</code> fields are queryable or sortable.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-query-watches.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches>`_
 
         :param from_: The offset from the first result to fetch. It must be non-negative.
         :param query: A query that filters the watches to be returned.
@@ -663,6 +661,7 @@ class WatcherClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -672,7 +671,9 @@ class WatcherClient(NamespacedClient):
           Start the Watcher service if it is not already running.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-start.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start>`_
+
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_watcher/_start"
@@ -683,6 +684,8 @@ class WatcherClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -734,7 +737,7 @@ class WatcherClient(NamespacedClient):
           You retrieve more metrics by using the metric parameter.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats>`_
 
         :param metric: Defines which additional metrics are included in the response.
         :param emit_stacktraces: Defines whether stack traces are generated for each
@@ -785,7 +788,7 @@ class WatcherClient(NamespacedClient):
           Stop the Watcher service if it is running.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-stop.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop>`_
 
         :param master_timeout: The period to wait for the master node. If the master
             node is not available before the timeout expires, the request fails and returns
@@ -840,10 +843,13 @@ class WatcherClient(NamespacedClient):
           <p>Update Watcher index settings.
           Update settings for the Watcher internal index (<code>.watches</code>).
           Only a subset of settings can be modified.
-          This includes <code>index.auto_expand_replicas</code> and <code>index.number_of_replicas</code>.</p>
+          This includes <code>index.auto_expand_replicas</code>, <code>index.number_of_replicas</code>, <code>index.routing.allocation.exclude.*</code>,
+          <code>index.routing.allocation.include.*</code> and <code>index.routing.allocation.require.*</code>.
+          Modification of <code>index.routing.allocation.include._tier_preference</code> is an exception and is not allowed as the
+          Watcher shards must always be in the <code>data_content</code> tier.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-update-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings>`_
 
         :param index_auto_expand_replicas:
         :param index_number_of_replicas:
diff -pruN 8.17.2-2/elasticsearch/_async/client/xpack.py 9.2.0-1/elasticsearch/_async/client/xpack.py
--- 8.17.2-2/elasticsearch/_async/client/xpack.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/client/xpack.py	2025-10-28 16:50:53.000000000 +0000
@@ -54,7 +54,7 @@ class XPackClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/info-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info>`_
 
         :param accept_enterprise: If this param is used it must be set to true
         :param categories: A comma-separated list of the information categories to include
@@ -103,7 +103,7 @@ class XPackClient(NamespacedClient):
           The API also provides some usage statistics.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/usage-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
diff -pruN 8.17.2-2/elasticsearch/_async/helpers.py 9.2.0-1/elasticsearch/_async/helpers.py
--- 8.17.2-2/elasticsearch/_async/helpers.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_async/helpers.py	2025-10-28 16:50:53.000000000 +0000
@@ -33,12 +33,18 @@ from typing import (
     Union,
 )
 
+import sniffio
+from anyio import create_memory_object_stream, create_task_group, move_on_after
+
 from ..exceptions import ApiError, NotFoundError, TransportError
 from ..helpers.actions import (
     _TYPE_BULK_ACTION,
     _TYPE_BULK_ACTION_BODY,
     _TYPE_BULK_ACTION_HEADER,
     _TYPE_BULK_ACTION_HEADER_AND_BODY,
+    _TYPE_BULK_ACTION_HEADER_WITH_META_AND_BODY,
+    _TYPE_BULK_ACTION_WITH_META,
+    BulkMeta,
     _ActionChunker,
     _process_bulk_chunk_error,
     _process_bulk_chunk_success,
@@ -53,10 +59,20 @@ logger = logging.getLogger("elasticsearc
 T = TypeVar("T")
 
 
+async def _sleep(seconds: float) -> None:
+    if sniffio.current_async_library() == "trio":
+        import trio
+
+        await trio.sleep(seconds)
+    else:
+        await asyncio.sleep(seconds)
+
+
 async def _chunk_actions(
-    actions: AsyncIterable[_TYPE_BULK_ACTION_HEADER_AND_BODY],
+    actions: AsyncIterable[_TYPE_BULK_ACTION_HEADER_WITH_META_AND_BODY],
     chunk_size: int,
     max_chunk_bytes: int,
+    flush_after_seconds: Optional[float],
     serializer: Serializer,
 ) -> AsyncIterable[
     Tuple[
@@ -76,10 +92,46 @@ async def _chunk_actions(
     chunker = _ActionChunker(
         chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, serializer=serializer
     )
-    async for action, data in actions:
-        ret = chunker.feed(action, data)
-        if ret:
-            yield ret
+
+    action: _TYPE_BULK_ACTION_WITH_META
+    data: _TYPE_BULK_ACTION_BODY
+    if not flush_after_seconds:
+        async for action, data in actions:
+            ret = chunker.feed(action, data)
+            if ret:
+                yield ret
+    else:
+        sender, receiver = create_memory_object_stream[
+            _TYPE_BULK_ACTION_HEADER_WITH_META_AND_BODY
+        ]()
+
+        async def get_items() -> None:
+            try:
+                async for item in actions:
+                    await sender.send(item)
+            finally:
+                await sender.send((BulkMeta.done, None))
+
+        async with create_task_group() as tg:
+            tg.start_soon(get_items)
+
+            timeout: Optional[float] = flush_after_seconds
+            while True:
+                action = {}
+                data = None
+                with move_on_after(timeout) as scope:
+                    action, data = await receiver.receive()
+                    timeout = flush_after_seconds
+                if scope.cancelled_caught:
+                    action, data = BulkMeta.flush, None
+                    timeout = None
+
+                if action is BulkMeta.done:
+                    break
+                ret = chunker.feed(action, data)
+                if ret:
+                    yield ret
+
     ret = chunker.flush()
     if ret:
         yield ret
@@ -136,7 +188,6 @@ def aiter(x: Union[Iterable[T], AsyncIte
         return x.__aiter__()
 
     async def f() -> AsyncIterable[T]:
-        nonlocal x
         ix: Iterable[T] = x
         for item in ix:
             yield item
@@ -160,9 +211,13 @@ async def azip(
 
 async def async_streaming_bulk(
     client: AsyncElasticsearch,
-    actions: Union[Iterable[_TYPE_BULK_ACTION], AsyncIterable[_TYPE_BULK_ACTION]],
+    actions: Union[
+        Iterable[_TYPE_BULK_ACTION_WITH_META],
+        AsyncIterable[_TYPE_BULK_ACTION_WITH_META],
+    ],
     chunk_size: int = 500,
     max_chunk_bytes: int = 100 * 1024 * 1024,
+    flush_after_seconds: Optional[float] = None,
     raise_on_error: bool = True,
     expand_action_callback: Callable[
         [_TYPE_BULK_ACTION], _TYPE_BULK_ACTION_HEADER_AND_BODY
@@ -195,6 +250,9 @@ async def async_streaming_bulk(
     :arg actions: iterable or async iterable containing the actions to be executed
     :arg chunk_size: number of docs in one chunk sent to es (default: 500)
     :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
+    :arg flush_after_seconds: time in seconds after which a chunk is written even
+        if hasn't reached `chunk_size` or `max_chunk_bytes`. Set to 0 to not use a
+        timeout-based flush. (default: 0)
     :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
         from the execution of the last chunk when some occur. By default we raise.
     :arg raise_on_exception: if ``False`` then don't propagate exceptions from
@@ -221,9 +279,14 @@ async def async_streaming_bulk(
     if isinstance(retry_on_status, int):
         retry_on_status = (retry_on_status,)
 
-    async def map_actions() -> AsyncIterable[_TYPE_BULK_ACTION_HEADER_AND_BODY]:
+    async def map_actions() -> (
+        AsyncIterable[_TYPE_BULK_ACTION_HEADER_WITH_META_AND_BODY]
+    ):
         async for item in aiter(actions):
-            yield expand_action_callback(item)
+            if isinstance(item, BulkMeta):
+                yield item, None
+            else:
+                yield expand_action_callback(item)
 
     serializer = client.transport.serializers.get_serializer("application/json")
 
@@ -235,7 +298,7 @@ async def async_streaming_bulk(
     ]
     bulk_actions: List[bytes]
     async for bulk_data, bulk_actions in _chunk_actions(
-        map_actions(), chunk_size, max_chunk_bytes, serializer
+        map_actions(), chunk_size, max_chunk_bytes, flush_after_seconds, serializer
     ):
         for attempt in range(max_retries + 1):
             to_retry: List[bytes] = []
@@ -246,9 +309,7 @@ async def async_streaming_bulk(
                 ]
             ] = []
             if attempt:
-                await asyncio.sleep(
-                    min(max_backoff, initial_backoff * 2 ** (attempt - 1))
-                )
+                await _sleep(min(max_backoff, initial_backoff * 2 ** (attempt - 1)))
 
             try:
                 data: Union[
diff -pruN 8.17.2-2/elasticsearch/_otel.py 9.2.0-1/elasticsearch/_otel.py
--- 8.17.2-2/elasticsearch/_otel.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_otel.py	2025-10-28 16:50:53.000000000 +0000
@@ -67,7 +67,7 @@ class OpenTelemetry:
         *,
         endpoint_id: str | None,
         path_parts: Mapping[str, str],
-    ) -> Generator[OpenTelemetrySpan, None, None]:
+    ) -> Generator[OpenTelemetrySpan]:
         if not self.enabled or self.tracer is None:
             yield OpenTelemetrySpan(None)
             return
@@ -75,11 +75,11 @@ class OpenTelemetry:
         span_name = endpoint_id or method
         with self.tracer.start_as_current_span(span_name) as otel_span:
             otel_span.set_attribute("http.request.method", method)
-            otel_span.set_attribute("db.system", "elasticsearch")
+            otel_span.set_attribute("db.system.name", "elasticsearch")
             if endpoint_id is not None:
-                otel_span.set_attribute("db.operation", endpoint_id)
+                otel_span.set_attribute("db.operation.name", endpoint_id)
             for key, value in path_parts.items():
-                otel_span.set_attribute(f"db.elasticsearch.path_parts.{key}", value)
+                otel_span.set_attribute(f"db.operation.parameter.{key}", value)
 
             yield OpenTelemetrySpan(
                 otel_span,
@@ -88,20 +88,20 @@ class OpenTelemetry:
             )
 
     @contextlib.contextmanager
-    def helpers_span(self, span_name: str) -> Generator[OpenTelemetrySpan, None, None]:
+    def helpers_span(self, span_name: str) -> Generator[OpenTelemetrySpan]:
         if not self.enabled or self.tracer is None:
             yield OpenTelemetrySpan(None)
             return
 
         with self.tracer.start_as_current_span(span_name) as otel_span:
-            otel_span.set_attribute("db.system", "elasticsearch")
-            otel_span.set_attribute("db.operation", span_name)
+            otel_span.set_attribute("db.system.name", "elasticsearch")
+            otel_span.set_attribute("db.operation.name", span_name)
             # Without a request method, Elastic APM does not display the traces
             otel_span.set_attribute("http.request.method", "null")
             yield OpenTelemetrySpan(otel_span)
 
     @contextlib.contextmanager
-    def use_span(self, span: OpenTelemetrySpan) -> Generator[None, None, None]:
+    def use_span(self, span: OpenTelemetrySpan) -> Generator[None]:
         if not self.enabled or self.tracer is None or span.otel_span is None:
             yield
             return
diff -pruN 8.17.2-2/elasticsearch/_sync/client/__init__.py 9.2.0-1/elasticsearch/_sync/client/__init__.py
--- 8.17.2-2/elasticsearch/_sync/client/__init__.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -18,7 +18,6 @@
 
 import logging
 import typing as t
-import warnings
 
 from elastic_transport import (
     BaseNode,
@@ -64,6 +63,7 @@ from .migration import MigrationClient
 from .ml import MlClient
 from .monitoring import MonitoringClient
 from .nodes import NodesClient
+from .project import ProjectClient
 from .query_rules import QueryRulesClient
 from .rollup import RollupClient
 from .search_application import SearchApplicationClient
@@ -75,6 +75,7 @@ from .slm import SlmClient
 from .snapshot import SnapshotClient
 from .sql import SqlClient
 from .ssl import SslClient
+from .streams import StreamsClient
 from .synonyms import SynonymsClient
 from .tasks import TasksClient
 from .text_structure import TextStructureClient
@@ -181,37 +182,13 @@ class Elasticsearch(BaseClient):
             t.Callable[[t.Dict[str, t.Any], NodeConfig], t.Optional[NodeConfig]]
         ] = None,
         meta_header: t.Union[DefaultType, bool] = DEFAULT,
-        timeout: t.Union[DefaultType, None, float] = DEFAULT,
-        randomize_hosts: t.Union[DefaultType, bool] = DEFAULT,
-        host_info_callback: t.Optional[
-            t.Callable[
-                [t.Dict[str, t.Any], t.Dict[str, t.Union[str, int]]],
-                t.Optional[t.Dict[str, t.Union[str, int]]],
-            ]
-        ] = None,
-        sniffer_timeout: t.Union[DefaultType, None, float] = DEFAULT,
-        sniff_on_connection_fail: t.Union[DefaultType, bool] = DEFAULT,
         http_auth: t.Union[DefaultType, t.Any] = DEFAULT,
-        maxsize: t.Union[DefaultType, int] = DEFAULT,
         # Internal use only
         _transport: t.Optional[Transport] = None,
     ) -> None:
         if hosts is None and cloud_id is None and _transport is None:
             raise ValueError("Either 'hosts' or 'cloud_id' must be specified")
 
-        if timeout is not DEFAULT:
-            if request_timeout is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'timeout' and 'request_timeout', "
-                    "instead only specify 'request_timeout'"
-                )
-            warnings.warn(
-                "The 'timeout' parameter is deprecated in favor of 'request_timeout'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            request_timeout = timeout
-
         if serializer is not None:
             if serializers is not DEFAULT:
                 raise ValueError(
@@ -220,58 +197,6 @@ class Elasticsearch(BaseClient):
                 )
             serializers = {default_mimetype: serializer}
 
-        if randomize_hosts is not DEFAULT:
-            if randomize_nodes_in_pool is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'randomize_hosts' and 'randomize_nodes_in_pool', "
-                    "instead only specify 'randomize_nodes_in_pool'"
-                )
-            warnings.warn(
-                "The 'randomize_hosts' parameter is deprecated in favor of 'randomize_nodes_in_pool'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            randomize_nodes_in_pool = randomize_hosts
-
-        if sniffer_timeout is not DEFAULT:
-            if min_delay_between_sniffing is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'sniffer_timeout' and 'min_delay_between_sniffing', "
-                    "instead only specify 'min_delay_between_sniffing'"
-                )
-            warnings.warn(
-                "The 'sniffer_timeout' parameter is deprecated in favor of 'min_delay_between_sniffing'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            min_delay_between_sniffing = sniffer_timeout
-
-        if sniff_on_connection_fail is not DEFAULT:
-            if sniff_on_node_failure is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'sniff_on_connection_fail' and 'sniff_on_node_failure', "
-                    "instead only specify 'sniff_on_node_failure'"
-                )
-            warnings.warn(
-                "The 'sniff_on_connection_fail' parameter is deprecated in favor of 'sniff_on_node_failure'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            sniff_on_node_failure = sniff_on_connection_fail
-
-        if maxsize is not DEFAULT:
-            if connections_per_node is not DEFAULT:
-                raise ValueError(
-                    "Can't specify both 'maxsize' and 'connections_per_node', "
-                    "instead only specify 'connections_per_node'"
-                )
-            warnings.warn(
-                "The 'maxsize' parameter is deprecated in favor of 'connections_per_node'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-            connections_per_node = maxsize
-
         # Setting min_delay_between_sniffing=True implies sniff_before_requests=True
         if min_delay_between_sniffing is not DEFAULT:
             sniff_before_requests = True
@@ -293,22 +218,7 @@ class Elasticsearch(BaseClient):
             )
 
         sniff_callback = None
-        if host_info_callback is not None:
-            if sniffed_node_callback is not None:
-                raise ValueError(
-                    "Can't specify both 'host_info_callback' and 'sniffed_node_callback', "
-                    "instead only specify 'sniffed_node_callback'"
-                )
-            warnings.warn(
-                "The 'host_info_callback' parameter is deprecated in favor of 'sniffed_node_callback'",
-                category=DeprecationWarning,
-                stacklevel=2,
-            )
-
-            sniff_callback = create_sniff_callback(
-                host_info_callback=host_info_callback
-            )
-        elif sniffed_node_callback is not None:
+        if sniffed_node_callback is not None:
             sniff_callback = create_sniff_callback(
                 sniffed_node_callback=sniffed_node_callback
             )
@@ -460,6 +370,7 @@ class Elasticsearch(BaseClient):
         self.migration = MigrationClient(self)
         self.ml = MlClient(self)
         self.monitoring = MonitoringClient(self)
+        self.project = ProjectClient(self)
         self.query_rules = QueryRulesClient(self)
         self.rollup = RollupClient(self)
         self.search_application = SearchApplicationClient(self)
@@ -470,6 +381,7 @@ class Elasticsearch(BaseClient):
         self.shutdown = ShutdownClient(self)
         self.sql = SqlClient(self)
         self.ssl = SslClient(self)
+        self.streams = StreamsClient(self)
         self.synonyms = SynonymsClient(self)
         self.text_structure = TextStructureClient(self)
         self.transform = TransformClient(self)
@@ -626,6 +538,7 @@ class Elasticsearch(BaseClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        include_source_on_error: t.Optional[bool] = None,
         list_executed_pipelines: t.Optional[bool] = None,
         pipeline: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
@@ -697,6 +610,7 @@ class Elasticsearch(BaseClient):
           <li>JavaScript: Check out <code>client.helpers.*</code></li>
           <li>.NET: Check out <code>BulkAllObservable</code></li>
           <li>PHP: Check out bulk indexing.</li>
+          <li>Ruby: Check out <code>Elasticsearch::Helpers::BulkHelper</code></li>
           </ul>
           <p><strong>Submitting bulk requests with cURL</strong></p>
           <p>If you're providing text file input to <code>curl</code>, you must use the <code>--data-binary</code> flag instead of plain <code>-d</code>.
@@ -726,13 +640,17 @@ class Elasticsearch(BaseClient):
           Imagine a <code>_bulk?refresh=wait_for</code> request with three documents in it that happen to be routed to different shards in an index with five shards.
           The request will only wait for those three shards to refresh.
           The other two shards that make up the index do not participate in the <code>_bulk</code> request at all.</p>
+          <p>You might want to disable the refresh interval temporarily to improve indexing throughput for large bulk requests.
+          Refer to the linked documentation for step-by-step instructions using the index settings API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-bulk.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk>`_
 
         :param operations:
         :param index: The name of the data stream, index, or index alias to perform bulk
             actions on.
+        :param include_source_on_error: True or false if to include the document source
+            in the error message in case of parsing errors.
         :param list_executed_pipelines: If `true`, the response will include the ingest
             pipelines that were run for each index or create.
         :param pipeline: The pipeline identifier to use to preprocess incoming documents.
@@ -790,6 +708,8 @@ class Elasticsearch(BaseClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if include_source_on_error is not None:
+            __query["include_source_on_error"] = include_source_on_error
         if list_executed_pipelines is not None:
             __query["list_executed_pipelines"] = list_executed_pipelines
         if pipeline is not None:
@@ -849,7 +769,7 @@ class Elasticsearch(BaseClient):
           Clear the search context and results for a scrolling search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-scroll-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll>`_
 
         :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`.
         """
@@ -906,7 +826,7 @@ class Elasticsearch(BaseClient):
           However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/point-in-time-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time>`_
 
         :param id: The ID of the point-in-time.
         """
@@ -927,11 +847,7 @@ class Elasticsearch(BaseClient):
         if not __body:
             if id is not None:
                 __body["id"] = id
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "DELETE",
             __path,
@@ -971,6 +887,7 @@ class Elasticsearch(BaseClient):
         min_score: t.Optional[float] = None,
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         q: t.Optional[str] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         routing: t.Optional[str] = None,
@@ -990,7 +907,7 @@ class Elasticsearch(BaseClient):
           This means that replicas increase the scalability of the count.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-count.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams and indices,
@@ -1004,8 +921,8 @@ class Elasticsearch(BaseClient):
             This parameter can be used only when the `q` query string parameter is specified.
         :param analyzer: The analyzer to use for the query string. This parameter can
             be used only when the `q` query string parameter is specified.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for query string query: `and` or
+            `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as a default when no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -1025,6 +942,10 @@ class Elasticsearch(BaseClient):
             in the result.
         :param preference: The node or shard the operation should be performed on. By
             default, it is random.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param q: The query in Lucene query string syntax. This parameter cannot be used
             with a request body.
         :param query: Defines the search query using Query DSL. A request body query
@@ -1077,6 +998,8 @@ class Elasticsearch(BaseClient):
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if q is not None:
             __query["q"] = q
         if routing is not None:
@@ -1114,16 +1037,19 @@ class Elasticsearch(BaseClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        include_source_on_error: t.Optional[bool] = None,
         pipeline: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         refresh: t.Optional[
             t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
         ] = None,
+        require_alias: t.Optional[bool] = None,
+        require_data_stream: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         wait_for_active_shards: t.Optional[
             t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
@@ -1163,7 +1089,7 @@ class Elasticsearch(BaseClient):
           This does come at the (very minimal) cost of an additional document parsing pass.
           If the <code>_routing</code> mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.</p>
           <p>NOTE: Data streams do not support custom routing unless they were created with the <code>allow_custom_routing</code> setting enabled in the template.</p>
-          <p>** Distributed**</p>
+          <p><strong>Distributed</strong></p>
           <p>The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.
           After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.</p>
           <p><strong>Active shards</strong></p>
@@ -1186,7 +1112,7 @@ class Elasticsearch(BaseClient):
           The <code>_shards</code> section of the API response reveals the number of shard copies on which replication succeeded and failed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-index_.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create>`_
 
         :param index: The name of the data stream or index to target. If the target doesn't
             exist and matches the name or wildcard (`*`) pattern of an index template
@@ -1196,6 +1122,8 @@ class Elasticsearch(BaseClient):
         :param id: A unique identifier for the document. To automatically generate a
             document ID, use the `POST /<target>/_doc/` request format.
         :param document:
+        :param include_source_on_error: True or false if to include the document source
+            in the error message in case of parsing errors.
         :param pipeline: The ID of the pipeline to use to preprocess incoming documents.
             If the index has a default ingest pipeline specified, setting the value to
             `_none` turns off the default ingest pipeline for this request. If a final
@@ -1204,6 +1132,9 @@ class Elasticsearch(BaseClient):
         :param refresh: If `true`, Elasticsearch refreshes the affected shards to make
             this operation visible to search. If `wait_for`, it waits for a refresh to
             make this operation visible to search. If `false`, it does nothing with refreshes.
+        :param require_alias: If `true`, the destination must be an index alias.
+        :param require_data_stream: If `true`, the request's actions must target a data
+            stream (existing or to be created).
         :param routing: A custom value that is used to route operations to a specific
             shard.
         :param timeout: The period the request waits for the following operations: automatic
@@ -1244,12 +1175,18 @@ class Elasticsearch(BaseClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if include_source_on_error is not None:
+            __query["include_source_on_error"] = include_source_on_error
         if pipeline is not None:
             __query["pipeline"] = pipeline
         if pretty is not None:
             __query["pretty"] = pretty
         if refresh is not None:
             __query["refresh"] = refresh
+        if require_alias is not None:
+            __query["require_alias"] = require_alias
+        if require_data_stream is not None:
+            __query["require_data_stream"] = require_data_stream
         if routing is not None:
             __query["routing"] = routing
         if timeout is not None:
@@ -1291,7 +1228,7 @@ class Elasticsearch(BaseClient):
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         wait_for_active_shards: t.Optional[
             t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
@@ -1326,7 +1263,7 @@ class Elasticsearch(BaseClient):
           It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-delete.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete>`_
 
         :param index: The name of the target index.
         :param id: A unique identifier for the document.
@@ -1395,7 +1332,7 @@ class Elasticsearch(BaseClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("max_docs", "query", "slice"),
+        body_fields=("max_docs", "query", "slice", "sort"),
         parameter_aliases={"from": "from_"},
     )
     def delete_by_query(
@@ -1439,7 +1376,12 @@ class Elasticsearch(BaseClient):
         ] = None,
         slice: t.Optional[t.Mapping[str, t.Any]] = None,
         slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None,
-        sort: t.Optional[t.Sequence[str]] = None,
+        sort: t.Optional[
+            t.Union[
+                t.Sequence[t.Union[str, t.Mapping[str, t.Any]]],
+                t.Union[str, t.Mapping[str, t.Any]],
+            ]
+        ] = None,
         stats: t.Optional[t.Sequence[str]] = None,
         terminate_after: t.Optional[int] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
@@ -1515,7 +1457,7 @@ class Elasticsearch(BaseClient):
           The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-delete-by-query.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams or indices,
@@ -1531,8 +1473,8 @@ class Elasticsearch(BaseClient):
             used only when the `q` query string parameter is specified.
         :param conflicts: What to do if delete by query hits version conflicts: `abort`
             or `proceed`.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for query string query: `and` or
+            `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as default where no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -1541,7 +1483,7 @@ class Elasticsearch(BaseClient):
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. It supports comma-separated
             values, such as `open,hidden`.
-        :param from_: Starting offset (default: 0)
+        :param from_: Skips the specified number of documents.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param lenient: If `true`, format-based query failures (such as providing text
@@ -1571,7 +1513,7 @@ class Elasticsearch(BaseClient):
         :param slice: Slice the request manually using the provided slice ID and total
             number of slices.
         :param slices: The number of slices this task should be divided into.
-        :param sort: A comma-separated list of `<field>:<direction>` pairs.
+        :param sort: A sort object that specifies the order of deleted documents.
         :param stats: The specific `tag` of the request for logging and statistical purposes.
         :param terminate_after: The maximum number of documents to collect for each shard.
             If a query reaches this limit, Elasticsearch terminates the query early.
@@ -1661,8 +1603,6 @@ class Elasticsearch(BaseClient):
             __query["search_type"] = search_type
         if slices is not None:
             __query["slices"] = slices
-        if sort is not None:
-            __query["sort"] = sort
         if stats is not None:
             __query["stats"] = stats
         if terminate_after is not None:
@@ -1682,6 +1622,8 @@ class Elasticsearch(BaseClient):
                 __body["query"] = query
             if slice is not None:
                 __body["slice"] = slice
+            if sort is not None:
+                __body["sort"] = sort
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -1697,7 +1639,7 @@ class Elasticsearch(BaseClient):
     def delete_by_query_rethrottle(
         self,
         *,
-        task_id: t.Union[int, str],
+        task_id: str,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -1712,7 +1654,7 @@ class Elasticsearch(BaseClient):
           Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-delete-by-query.html#docs-delete-by-query-rethrottle>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle>`_
 
         :param task_id: The ID for the task.
         :param requests_per_second: The throttle for this request in sub-requests per
@@ -1762,7 +1704,7 @@ class Elasticsearch(BaseClient):
           Deletes a stored script or search template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-stored-script-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script>`_
 
         :param id: The identifier for the stored script or search template.
         :param master_timeout: The period to wait for a connection to the master node.
@@ -1826,7 +1768,7 @@ class Elasticsearch(BaseClient):
         stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
     ) -> HeadApiResponse:
         """
@@ -1846,7 +1788,7 @@ class Elasticsearch(BaseClient):
           Elasticsearch cleans up deleted documents in the background as you continue to index more data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases. It
             supports wildcards (`*`).
@@ -1955,7 +1897,7 @@ class Elasticsearch(BaseClient):
         source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
     ) -> HeadApiResponse:
         """
@@ -1969,7 +1911,7 @@ class Elasticsearch(BaseClient):
           <p>A document's source is not available if it is disabled in the mapping.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases. It
             supports wildcards (`*`).
@@ -2075,7 +2017,7 @@ class Elasticsearch(BaseClient):
           It computes a score explanation for a query and a specific document.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-explain.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain>`_
 
         :param index: Index names that are used to limit the request. Only a single index
             name can be provided to this parameter.
@@ -2084,8 +2026,8 @@ class Elasticsearch(BaseClient):
             This parameter can be used only when the `q` query string parameter is specified.
         :param analyzer: The analyzer to use for the query string. This parameter can
             be used only when the `q` query string parameter is specified.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for query string query: `and` or
+            `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as default where no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -2196,6 +2138,7 @@ class Elasticsearch(BaseClient):
         include_unmapped: t.Optional[bool] = None,
         index_filter: t.Optional[t.Mapping[str, t.Any]] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         types: t.Optional[t.Sequence[str]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
@@ -2210,7 +2153,7 @@ class Elasticsearch(BaseClient):
           For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the <code>keyword</code> family.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-field-caps.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (*). To target all data streams
@@ -2239,6 +2182,11 @@ class Elasticsearch(BaseClient):
             deleted documents) are outside of the provided range. However, not all queries
             can rewrite to `match_none` so this API may return an index even if the provided
             filter matches no document.
+        :param project_routing: Specifies a subset of projects to target for the field-caps
+            query using project metadata tags in a subset of Lucene query syntax. Allowed
+            Lucene queries: the _alias tag and a single value (possibly wildcarded).
+            Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless
+            only.
         :param runtime_mappings: Define ad-hoc runtime fields in the request similar
             to the way it is done in search requests. These fields exist only as part
             of the query and take precedence over fields defined with the same name in
@@ -2276,6 +2224,8 @@ class Elasticsearch(BaseClient):
             __query["include_unmapped"] = include_unmapped
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if types is not None:
             __query["types"] = types
         if not __body:
@@ -2303,6 +2253,7 @@ class Elasticsearch(BaseClient):
     @_rewrite_parameters(
         parameter_aliases={
             "_source": "source",
+            "_source_exclude_vectors": "source_exclude_vectors",
             "_source_excludes": "source_excludes",
             "_source_includes": "source_includes",
         },
@@ -2322,12 +2273,13 @@ class Elasticsearch(BaseClient):
         refresh: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None,
+        source_exclude_vectors: t.Optional[bool] = None,
         source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -2371,12 +2323,12 @@ class Elasticsearch(BaseClient):
           Elasticsearch cleans up deleted documents in the background as you continue to index more data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get>`_
 
         :param index: The name of the index that contains the document.
         :param id: A unique document identifier.
         :param force_synthetic_source: Indicates whether the request forces synthetic
-            `_source`. Use this paramater to test if the mapping supports synthetic `_source`
+            `_source`. Use this parameter to test if the mapping supports synthetic `_source`
             and to get a sense of the worst case performance. Fetches with this parameter
             enabled will be slower than enabling synthetic source natively in the index.
         :param preference: The node or shard the operation should be performed on. By
@@ -2395,6 +2347,7 @@ class Elasticsearch(BaseClient):
         :param routing: A custom value used to route operations to a specific shard.
         :param source: Indicates whether to return the `_source` field (`true` or `false`)
             or lists the fields to return.
+        :param source_exclude_vectors: Whether vectors should be excluded from _source
         :param source_excludes: A comma-separated list of source fields to exclude from
             the response. You can also use this parameter to exclude fields from the
             subset specified in `_source_includes` query parameter. If the `_source`
@@ -2407,8 +2360,8 @@ class Elasticsearch(BaseClient):
         :param stored_fields: A comma-separated list of stored fields to return as part
             of a hit. If no fields are specified, no stored fields are included in the
             response. If this field is specified, the `_source` parameter defaults to
-            `false`. Only leaf fields can be retrieved with the `stored_field` option.
-            Object fields can't be returned;​if specified, the request fails.
+            `false`. Only leaf fields can be retrieved with the `stored_fields` option.
+            Object fields can't be returned; if specified, the request fails.
         :param version: The version number for concurrency control. It must match the
             current version of the document for the request to succeed.
         :param version_type: The version type.
@@ -2440,6 +2393,8 @@ class Elasticsearch(BaseClient):
             __query["routing"] = routing
         if source is not None:
             __query["_source"] = source
+        if source_exclude_vectors is not None:
+            __query["_source_exclude_vectors"] = source_exclude_vectors
         if source_excludes is not None:
             __query["_source_excludes"] = source_excludes
         if source_includes is not None:
@@ -2478,7 +2433,7 @@ class Elasticsearch(BaseClient):
           Retrieves a stored script or search template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-scripting.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script>`_
 
         :param id: The identifier for the stored script or search template.
         :param master_timeout: The period to wait for the master node. If the master
@@ -2527,7 +2482,7 @@ class Elasticsearch(BaseClient):
           <p>Get a list of supported script contexts and their methods.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-script-contexts-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_script_context"
@@ -2566,7 +2521,7 @@ class Elasticsearch(BaseClient):
           <p>Get a list of available script types, languages, and contexts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-script-languages-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_script_language"
@@ -2612,10 +2567,9 @@ class Elasticsearch(BaseClient):
         source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None,
         source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -2631,7 +2585,7 @@ class Elasticsearch(BaseClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get>`_
 
         :param index: The name of the index that contains the document.
         :param id: A unique document identifier.
@@ -2649,8 +2603,6 @@ class Elasticsearch(BaseClient):
             the response.
         :param source_includes: A comma-separated list of source fields to include in
             the response.
-        :param stored_fields: A comma-separated list of stored fields to return as part
-            of a hit.
         :param version: The version number for concurrency control. It must match the
             current version of the document for the request to succeed.
         :param version_type: The version type.
@@ -2684,8 +2636,6 @@ class Elasticsearch(BaseClient):
             __query["_source_excludes"] = source_excludes
         if source_includes is not None:
             __query["_source_includes"] = source_includes
-        if stored_fields is not None:
-            __query["stored_fields"] = stored_fields
         if version is not None:
             __query["version"] = version
         if version_type is not None:
@@ -2731,7 +2681,7 @@ class Elasticsearch(BaseClient):
           When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/health-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report>`_
 
         :param feature: A feature of the cluster, as returned by the top-level health
             report API.
@@ -2786,6 +2736,7 @@ class Elasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         if_primary_term: t.Optional[int] = None,
         if_seq_no: t.Optional[int] = None,
+        include_source_on_error: t.Optional[bool] = None,
         op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None,
         pipeline: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
@@ -2793,11 +2744,12 @@ class Elasticsearch(BaseClient):
             t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
         ] = None,
         require_alias: t.Optional[bool] = None,
+        require_data_stream: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         wait_for_active_shards: t.Optional[
             t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
@@ -2842,9 +2794,7 @@ class Elasticsearch(BaseClient):
           This does come at the (very minimal) cost of an additional document parsing pass.
           If the <code>_routing</code> mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.</p>
           <p>NOTE: Data streams do not support custom routing unless they were created with the <code>allow_custom_routing</code> setting enabled in the template.</p>
-          <ul>
-          <li>** Distributed**</li>
-          </ul>
+          <p><strong>Distributed</strong></p>
           <p>The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.
           After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.</p>
           <p><strong>Active shards</strong></p>
@@ -2897,7 +2847,7 @@ class Elasticsearch(BaseClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-index_.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create>`_
 
         :param index: The name of the data stream or index to target. If the target doesn't
             exist and matches the name or wildcard (`*`) pattern of an index template
@@ -2913,6 +2863,8 @@ class Elasticsearch(BaseClient):
             term.
         :param if_seq_no: Only perform the operation if the document has this sequence
             number.
+        :param include_source_on_error: True or false if to include the document source
+            in the error message in case of parsing errors.
         :param op_type: Set to `create` to only index the document if it does not already
             exist (put if absent). If a document with the specified `_id` already exists,
             the indexing operation will fail. The behavior is the same as using the `<index>/_create`
@@ -2928,6 +2880,8 @@ class Elasticsearch(BaseClient):
             this operation visible to search. If `wait_for`, it waits for a refresh to
             make this operation visible to search. If `false`, it does nothing with refreshes.
         :param require_alias: If `true`, the destination must be an index alias.
+        :param require_data_stream: If `true`, the request's actions must target a data
+            stream (existing or to be created).
         :param routing: A custom value that is used to route operations to a specific
             shard.
         :param timeout: The period the request waits for the following operations: automatic
@@ -2977,6 +2931,8 @@ class Elasticsearch(BaseClient):
             __query["if_primary_term"] = if_primary_term
         if if_seq_no is not None:
             __query["if_seq_no"] = if_seq_no
+        if include_source_on_error is not None:
+            __query["include_source_on_error"] = include_source_on_error
         if op_type is not None:
             __query["op_type"] = op_type
         if pipeline is not None:
@@ -2987,6 +2943,8 @@ class Elasticsearch(BaseClient):
             __query["refresh"] = refresh
         if require_alias is not None:
             __query["require_alias"] = require_alias
+        if require_data_stream is not None:
+            __query["require_data_stream"] = require_data_stream
         if routing is not None:
             __query["routing"] = routing
         if timeout is not None:
@@ -3022,10 +2980,11 @@ class Elasticsearch(BaseClient):
         .. raw:: html
 
           <p>Get cluster info.
-          Get basic build, version, and cluster information.</p>
+          Get basic build, version, and cluster information.
+          ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rest-api-root.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/"
@@ -3049,127 +3008,6 @@ class Elasticsearch(BaseClient):
         )
 
     @_rewrite_parameters(
-        body_fields=(
-            "knn",
-            "docvalue_fields",
-            "fields",
-            "filter",
-            "source",
-            "stored_fields",
-        ),
-        parameter_aliases={"_source": "source"},
-    )
-    @_stability_warning(Stability.EXPERIMENTAL)
-    def knn_search(
-        self,
-        *,
-        index: t.Union[str, t.Sequence[str]],
-        knn: t.Optional[t.Mapping[str, t.Any]] = None,
-        docvalue_fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None,
-        error_trace: t.Optional[bool] = None,
-        fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        filter: t.Optional[
-            t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]]
-        ] = None,
-        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        human: t.Optional[bool] = None,
-        pretty: t.Optional[bool] = None,
-        routing: t.Optional[str] = None,
-        source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None,
-        stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        body: t.Optional[t.Dict[str, t.Any]] = None,
-    ) -> ObjectApiResponse[t.Any]:
-        """
-        .. raw:: html
-
-          <p>Run a knn search.</p>
-          <p>NOTE: The kNN search API has been replaced by the <code>knn</code> option in the search API.</p>
-          <p>Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents.
-          Given a query vector, the API finds the k closest vectors and returns those documents as search hits.</p>
-          <p>Elasticsearch uses the HNSW algorithm to support efficient kNN search.
-          Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed.
-          This means the results returned are not always the true k closest neighbors.</p>
-          <p>The kNN search API supports restricting the search using a filter.
-          The search will return the top k documents that also match the filter query.</p>
-          <p>A kNN search response has the exact same structure as a search API response.
-          However, certain sections have a meaning specific to kNN search:</p>
-          <ul>
-          <li>The document <code>_score</code> is determined by the similarity between the query and document vector.</li>
-          <li>The <code>hits.total</code> object contains the total number of nearest neighbor candidates considered, which is <code>num_candidates * num_shards</code>. The <code>hits.total.relation</code> will always be <code>eq</code>, indicating an exact value.</li>
-          </ul>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/knn-search-api.html>`_
-
-        :param index: A comma-separated list of index names to search; use `_all` or
-            to perform the operation on all indices.
-        :param knn: The kNN query to run.
-        :param docvalue_fields: The request returns doc values for field names matching
-            these patterns in the `hits.fields` property of the response. It accepts
-            wildcard (`*`) patterns.
-        :param fields: The request returns values for field names matching these patterns
-            in the `hits.fields` property of the response. It accepts wildcard (`*`)
-            patterns.
-        :param filter: A query to filter the documents that can match. The kNN search
-            will return the top `k` documents that also match this filter. The value
-            can be a single query or a list of queries. If `filter` isn't provided, all
-            documents are allowed to match.
-        :param routing: A comma-separated list of specific routing values.
-        :param source: Indicates which source fields are returned for matching documents.
-            These fields are returned in the `hits._source` property of the search response.
-        :param stored_fields: A list of stored fields to return as part of a hit. If
-            no fields are specified, no stored fields are included in the response. If
-            this field is specified, the `_source` parameter defaults to `false`. You
-            can pass `_source: true` to return both source fields and stored fields in
-            the search response.
-        """
-        if index in SKIP_IN_PATH:
-            raise ValueError("Empty value passed for parameter 'index'")
-        if knn is None and body is None:
-            raise ValueError("Empty value passed for parameter 'knn'")
-        __path_parts: t.Dict[str, str] = {"index": _quote(index)}
-        __path = f'/{__path_parts["index"]}/_knn_search'
-        __query: t.Dict[str, t.Any] = {}
-        __body: t.Dict[str, t.Any] = body if body is not None else {}
-        if error_trace is not None:
-            __query["error_trace"] = error_trace
-        if filter_path is not None:
-            __query["filter_path"] = filter_path
-        if human is not None:
-            __query["human"] = human
-        if pretty is not None:
-            __query["pretty"] = pretty
-        if routing is not None:
-            __query["routing"] = routing
-        if not __body:
-            if knn is not None:
-                __body["knn"] = knn
-            if docvalue_fields is not None:
-                __body["docvalue_fields"] = docvalue_fields
-            if fields is not None:
-                __body["fields"] = fields
-            if filter is not None:
-                __body["filter"] = filter
-            if source is not None:
-                __body["_source"] = source
-            if stored_fields is not None:
-                __body["stored_fields"] = stored_fields
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
-        return self.perform_request(  # type: ignore[return-value]
-            "POST",
-            __path,
-            params=__query,
-            headers=__headers,
-            body=__body,
-            endpoint_id="knn_search",
-            path_parts=__path_parts,
-        )
-
-    @_rewrite_parameters(
         body_fields=("docs", "ids"),
         parameter_aliases={
             "_source": "source",
@@ -3215,7 +3053,7 @@ class Elasticsearch(BaseClient):
           You can include the <code>stored_fields</code> query parameter in the request URI to specify the defaults to use when there are no per-document instructions.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-multi-get.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget>`_
 
         :param index: Name of the index to retrieve documents from when `ids` are specified,
             or when a document in the `docs` array does not specify an index.
@@ -3326,6 +3164,7 @@ class Elasticsearch(BaseClient):
         max_concurrent_shard_requests: t.Optional[int] = None,
         pre_filter_shard_size: t.Optional[int] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         rest_total_hits_as_int: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         search_type: t.Optional[
@@ -3350,7 +3189,7 @@ class Elasticsearch(BaseClient):
           When sending requests to this endpoint the <code>Content-Type</code> header should be set to <code>application/x-ndjson</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-multi-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch>`_
 
         :param searches:
         :param index: Comma-separated list of data streams, indices, and index aliases
@@ -3377,7 +3216,8 @@ class Elasticsearch(BaseClient):
             computationally expensive named queries on a large number of hits may add
             significant overhead.
         :param max_concurrent_searches: Maximum number of concurrent searches the multi
-            search API can execute.
+            search API can execute. Defaults to `max(1, (# of data nodes * min(search
+            thread pool size, 10)))`.
         :param max_concurrent_shard_requests: Maximum number of concurrent shard requests
             that each sub-search request executes per node.
         :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter
@@ -3386,6 +3226,10 @@ class Elasticsearch(BaseClient):
             roundtrip can limit the number of shards significantly if for instance a
             shard can not match any documents based on its rewrite method i.e., if date
             filters are mandatory to match but the shard bounds and the query are disjoint.
+        :param project_routing: Specifies a subset of projects to target for a search
+            using project metadata tags in a subset Lucene syntax. Allowed Lucene queries:
+            the _alias tag and a single value (possible wildcarded). Examples: _alias:my-project
+            _alias:_origin _alias:*pr* Supported in serverless only.
         :param rest_total_hits_as_int: If true, hits.total are returned as an integer
             in the response. Defaults to false, which returns an object.
         :param routing: Custom routing value used to route search operations to a specific
@@ -3435,6 +3279,8 @@ class Elasticsearch(BaseClient):
             __query["pre_filter_shard_size"] = pre_filter_shard_size
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if rest_total_hits_as_int is not None:
             __query["rest_total_hits_as_int"] = rest_total_hits_as_int
         if routing is not None:
@@ -3473,6 +3319,7 @@ class Elasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         max_concurrent_searches: t.Optional[int] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         rest_total_hits_as_int: t.Optional[bool] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
@@ -3496,7 +3343,7 @@ class Elasticsearch(BaseClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/multi-search-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template>`_
 
         :param search_templates:
         :param index: A comma-separated list of data streams, indices, and aliases to
@@ -3506,6 +3353,10 @@ class Elasticsearch(BaseClient):
             for cross-cluster search requests.
         :param max_concurrent_searches: The maximum number of concurrent searches the
             API can run.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param rest_total_hits_as_int: If `true`, the response returns `hits.total` as
             an integer. If `false`, it returns `hits.total` as an object.
         :param search_type: The type of the search operation.
@@ -3538,6 +3389,8 @@ class Elasticsearch(BaseClient):
             __query["max_concurrent_searches"] = max_concurrent_searches
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if rest_total_hits_as_int is not None:
             __query["rest_total_hits_as_int"] = rest_total_hits_as_int
         if search_type is not None:
@@ -3583,7 +3436,7 @@ class Elasticsearch(BaseClient):
         term_statistics: t.Optional[bool] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -3601,7 +3454,7 @@ class Elasticsearch(BaseClient):
           The mapping used is determined by the specified <code>_index</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-multi-termvectors.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors>`_
 
         :param index: The name of the index that contains the documents.
         :param docs: An array of existing or artificial documents.
@@ -3705,8 +3558,10 @@ class Elasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         index_filter: t.Optional[t.Mapping[str, t.Any]] = None,
+        max_concurrent_shard_requests: t.Optional[int] = None,
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         routing: t.Optional[str] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -3741,7 +3596,7 @@ class Elasticsearch(BaseClient):
           You can check how many point-in-times (that is, search contexts) are open with the nodes stats API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/point-in-time-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time>`_
 
         :param index: A comma-separated list of index names to open point in time; use
             `_all` or empty string to perform the operation on all indices
@@ -3754,14 +3609,20 @@ class Elasticsearch(BaseClient):
         :param expand_wildcards: The type of index that wildcard patterns can match.
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. It supports comma-separated
-            values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`,
-            `hidden`, `none`.
+            values, such as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param index_filter: Filter indices if the provided query rewrites to `match_none`
             on every shard.
+        :param max_concurrent_shard_requests: Maximum number of concurrent shard requests
+            that each sub-search request executes per node.
         :param preference: The node or shard the operation should be performed on. By
             default, it is random.
+        :param project_routing: Specifies a subset of projects to target for the PIT
+            request using project metadata tags in a subset of Lucene query syntax. Allowed
+            Lucene queries: the _alias tag and a single value (possibly wildcarded).
+            Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless
+            only.
         :param routing: A custom value that is used to route operations to a specific
             shard.
         """
@@ -3787,10 +3648,14 @@ class Elasticsearch(BaseClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
+        if max_concurrent_shard_requests is not None:
+            __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if routing is not None:
             __query["routing"] = routing
         if not __body:
@@ -3835,7 +3700,7 @@ class Elasticsearch(BaseClient):
           Creates or updates a stored script or search template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-stored-script-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script>`_
 
         :param id: The identifier for the stored script or search template. It must be
             unique within the cluster.
@@ -3925,7 +3790,7 @@ class Elasticsearch(BaseClient):
           <p>Evaluate the quality of ranked search results over a set of typical search queries.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-rank-eval.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval>`_
 
         :param requests: A set of typical search requests, together with their provided
             ratings.
@@ -3989,7 +3854,7 @@ class Elasticsearch(BaseClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("dest", "source", "conflicts", "max_docs", "script", "size"),
+        body_fields=("dest", "source", "conflicts", "max_docs", "script"),
     )
     def reindex(
         self,
@@ -4007,7 +3872,6 @@ class Elasticsearch(BaseClient):
         require_alias: t.Optional[bool] = None,
         script: t.Optional[t.Mapping[str, t.Any]] = None,
         scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        size: t.Optional[int] = None,
         slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         wait_for_active_shards: t.Optional[
@@ -4051,113 +3915,17 @@ class Elasticsearch(BaseClient):
           In this case, the response includes a count of the version conflicts that were encountered.
           Note that the handling of other error types is unaffected by the <code>conflicts</code> property.
           Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than <code>max_docs</code> until it has successfully indexed <code>max_docs</code> documents into the target or it has gone through every document in the source query.</p>
-          <p>NOTE: The reindex API makes no effort to handle ID collisions.
-          The last document written will &quot;win&quot; but the order isn't usually predictable so it is not a good idea to rely on this behavior.
-          Instead, make sure that IDs are unique by using a script.</p>
-          <p><strong>Running reindex asynchronously</strong></p>
-          <p>If the request contains <code>wait_for_completion=false</code>, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.
-          Elasticsearch creates a record of this task as a document at <code>_tasks/&lt;task_id&gt;</code>.</p>
-          <p><strong>Reindex from multiple sources</strong></p>
-          <p>If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.
-          That way you can resume the process if there are any errors by removing the partially completed source and starting over.
-          It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.</p>
-          <p>For example, you can use a bash script like this:</p>
-          <pre><code>for index in i1 i2 i3 i4 i5; do
-            curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
-              &quot;source&quot;: {
-                &quot;index&quot;: &quot;'$index'&quot;
-              },
-              &quot;dest&quot;: {
-                &quot;index&quot;: &quot;'$index'-reindexed&quot;
-              }
-            }'
-          done
-          </code></pre>
-          <p>** Throttling**</p>
-          <p>Set <code>requests_per_second</code> to any positive decimal number (<code>1.4</code>, <code>6</code>, <code>1000</code>, for example) to throttle the rate at which reindex issues batches of index operations.
-          Requests are throttled by padding each batch with a wait time.
-          To turn off throttling, set <code>requests_per_second</code> to <code>-1</code>.</p>
-          <p>The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.
-          The padding time is the difference between the batch size divided by the <code>requests_per_second</code> and the time spent writing.
-          By default the batch size is <code>1000</code>, so if <code>requests_per_second</code> is set to <code>500</code>:</p>
-          <pre><code>target_time = 1000 / 500 per second = 2 seconds
-          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
-          </code></pre>
-          <p>Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.
-          This is &quot;bursty&quot; instead of &quot;smooth&quot;.</p>
-          <p><strong>Slicing</strong></p>
-          <p>Reindex supports sliced scroll to parallelize the reindexing process.
-          This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.</p>
-          <p>NOTE: Reindexing from remote clusters does not support manual or automatic slicing.</p>
-          <p>You can slice a reindex request manually by providing a slice ID and total number of slices to each request.
-          You can also let reindex automatically parallelize by using sliced scroll to slice on <code>_id</code>.
-          The <code>slices</code> parameter specifies the number of slices to use.</p>
-          <p>Adding <code>slices</code> to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:</p>
-          <ul>
-          <li>You can see these requests in the tasks API. These sub-requests are &quot;child&quot; tasks of the task for the request with slices.</li>
-          <li>Fetching the status of the task for the request with <code>slices</code> only contains the status of completed slices.</li>
-          <li>These sub-requests are individually addressable for things like cancellation and rethrottling.</li>
-          <li>Rethrottling the request with <code>slices</code> will rethrottle the unfinished sub-request proportionally.</li>
-          <li>Canceling the request with <code>slices</code> will cancel each sub-request.</li>
-          <li>Due to the nature of <code>slices</code>, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.</li>
-          <li>Parameters like <code>requests_per_second</code> and <code>max_docs</code> on a request with <code>slices</code> are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using <code>max_docs</code> with <code>slices</code> might not result in exactly <code>max_docs</code> documents being reindexed.</li>
-          <li>Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.</li>
-          </ul>
-          <p>If slicing automatically, setting <code>slices</code> to <code>auto</code> will choose a reasonable number for most indices.
-          If slicing manually or otherwise tuning automatic slicing, use the following guidelines.</p>
-          <p>Query performance is most efficient when the number of slices is equal to the number of shards in the index.
-          If that number is large (for example, <code>500</code>), choose a lower number as too many slices will hurt performance.
-          Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.</p>
-          <p>Indexing performance scales linearly across available resources with the number of slices.</p>
-          <p>Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.</p>
-          <p><strong>Modify documents during reindexing</strong></p>
-          <p>Like <code>_update_by_query</code>, reindex operations support a script that modifies the document.
-          Unlike <code>_update_by_query</code>, the script is allowed to modify the document's metadata.</p>
-          <p>Just as in <code>_update_by_query</code>, you can set <code>ctx.op</code> to change the operation that is run on the destination.
-          For example, set <code>ctx.op</code> to <code>noop</code> if your script decides that the document doesn’t have to be indexed in the destination. This &quot;no operation&quot; will be reported in the <code>noop</code> counter in the response body.
-          Set <code>ctx.op</code> to <code>delete</code> if your script decides that the document must be deleted from the destination.
-          The deletion will be reported in the <code>deleted</code> counter in the response body.
-          Setting <code>ctx.op</code> to anything else will return an error, as will setting any other field in <code>ctx</code>.</p>
-          <p>Think of the possibilities! Just be careful; you are able to change:</p>
+          <p>It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes.</p>
           <ul>
-          <li><code>_id</code></li>
-          <li><code>_index</code></li>
-          <li><code>_version</code></li>
-          <li><code>_routing</code></li>
+          <li>When requested with <code>wait_for_completion=true</code> (default), the request fails if the node shuts down.</li>
+          <li>When requested with <code>wait_for_completion=false</code>, a task id is returned, for use with the task management APIs. The task may disappear or fail if the node shuts down.
+          When retrying a failed reindex operation, it might be necessary to set <code>conflicts=proceed</code> or to first delete the partial destination index.
+          Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause.</li>
           </ul>
-          <p>Setting <code>_version</code> to <code>null</code> or clearing it from the <code>ctx</code> map is just like not sending the version in an indexing request.
-          It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.</p>
-          <p><strong>Reindex from remote</strong></p>
-          <p>Reindex supports reindexing from a remote Elasticsearch cluster.
-          The <code>host</code> parameter must contain a scheme, host, port, and optional path.
-          The <code>username</code> and <code>password</code> parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.
-          Be sure to use HTTPS when using basic authentication or the password will be sent in plain text.
-          There are a range of settings available to configure the behavior of the HTTPS connection.</p>
-          <p>When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.
-          Remote hosts must be explicitly allowed with the <code>reindex.remote.whitelist</code> setting.
-          It can be set to a comma delimited list of allowed remote host and port combinations.
-          Scheme is ignored; only the host and port are used.
-          For example:</p>
-          <pre><code>reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*&quot;]
-          </code></pre>
-          <p>The list of allowed hosts must be configured on any nodes that will coordinate the reindex.
-          This feature should work with remote clusters of any version of Elasticsearch.
-          This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.</p>
-          <p>WARNING: Elasticsearch does not support forward compatibility across major versions.
-          For example, you cannot reindex from a 7.x cluster into a 6.x cluster.</p>
-          <p>To enable queries sent to older versions of Elasticsearch, the <code>query</code> parameter is sent directly to the remote host without validation or modification.</p>
-          <p>NOTE: Reindexing from remote clusters does not support manual or automatic slicing.</p>
-          <p>Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.
-          If the remote index includes very large documents you'll need to use a smaller batch size.
-          It is also possible to set the socket read timeout on the remote connection with the <code>socket_timeout</code> field and the connection timeout with the <code>connect_timeout</code> field.
-          Both default to 30 seconds.</p>
-          <p><strong>Configuring SSL parameters</strong></p>
-          <p>Reindex from remote supports configurable SSL settings.
-          These must be specified in the <code>elasticsearch.yml</code> file, with the exception of the secure settings, which you add in the Elasticsearch keystore.
-          It is not possible to configure SSL in the body of the reindex request.</p>
+          <p>Refer to the linked documentation for examples of how to reindex documents.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-reindex.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex>`_
 
         :param dest: The destination you are copying to.
         :param source: The source you are copying from.
@@ -4179,7 +3947,6 @@ class Elasticsearch(BaseClient):
             reindexing.
         :param scroll: The period of time that a consistent view of the index should
             be maintained for scrolled search.
-        :param size:
         :param slices: The number of slices this task should be divided into. It defaults
             to one slice, which means the task isn't sliced into subtasks. Reindex supports
             sliced scroll to parallelize the reindexing process. This parallelization
@@ -4244,8 +4011,6 @@ class Elasticsearch(BaseClient):
                 __body["max_docs"] = max_docs
             if script is not None:
                 __body["script"] = script
-            if size is not None:
-                __body["size"] = size
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -4281,7 +4046,7 @@ class Elasticsearch(BaseClient):
           This behavior prevents scroll timeouts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-reindex.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex>`_
 
         :param task_id: The task identifier, which can be found by using the tasks API.
         :param requests_per_second: The throttle for this request in sub-requests per
@@ -4327,7 +4092,7 @@ class Elasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         params: t.Optional[t.Mapping[str, t.Any]] = None,
         pretty: t.Optional[bool] = None,
-        source: t.Optional[str] = None,
+        source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -4337,7 +4102,7 @@ class Elasticsearch(BaseClient):
           <p>Render a search template as a search request body.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/render-search-template-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template>`_
 
         :param id: The ID of the search template to render. If no `source` is specified,
             this or the `id` request body parameter is required.
@@ -4372,11 +4137,7 @@ class Elasticsearch(BaseClient):
                 __body["params"] = params
             if source is not None:
                 __body["source"] = source
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -4394,7 +4155,24 @@ class Elasticsearch(BaseClient):
     def scripts_painless_execute(
         self,
         *,
-        context: t.Optional[str] = None,
+        context: t.Optional[
+            t.Union[
+                str,
+                t.Literal[
+                    "boolean_field",
+                    "composite_field",
+                    "date_field",
+                    "double_field",
+                    "filter",
+                    "geo_point_field",
+                    "ip_field",
+                    "keyword_field",
+                    "long_field",
+                    "painless_test",
+                    "score",
+                ],
+            ]
+        ] = None,
         context_setup: t.Optional[t.Mapping[str, t.Any]] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -4406,15 +4184,22 @@ class Elasticsearch(BaseClient):
         """
         .. raw:: html
 
-          <p>Run a script.
-          Runs a script and returns a result.</p>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/painless/8.17/painless-execute-api.html>`_
-
-        :param context: The context that the script should run in.
-        :param context_setup: Additional parameters for the `context`.
-        :param script: The Painless script to execute.
+          <p>Run a script.</p>
+          <p>Runs a script and returns a result.
+          Use this API to build and test scripts, such as when defining a script for a runtime field.
+          This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.</p>
+          <p>The API uses several <em>contexts</em>, which control how scripts are run, what variables are available at runtime, and what the return type is.</p>
+          <p>Each context requires a script, but additional parameters depend on the context you're using for that script.</p>
+
+
+        `<https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples>`_
+
+        :param context: The context that the script should run in. NOTE: Result ordering
+            in the field contexts is not guaranteed.
+        :param context_setup: Additional parameters for the `context`. NOTE: This parameter
+            is required for all contexts except `painless_test`, which is the default
+            if no value is provided for `context`.
+        :param script: The Painless script to run.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_scripts/painless/_execute"
@@ -4435,11 +4220,7 @@ class Elasticsearch(BaseClient):
                 __body["context_setup"] = context_setup
             if script is not None:
                 __body["script"] = script
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -4480,7 +4261,7 @@ class Elasticsearch(BaseClient):
           <p>IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/scroll-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll>`_
 
         :param scroll_id: The scroll ID of the search.
         :param rest_total_hits_as_int: If true, the API response’s hit.total property
@@ -4564,6 +4345,7 @@ class Elasticsearch(BaseClient):
         ),
         parameter_aliases={
             "_source": "source",
+            "_source_exclude_vectors": "source_exclude_vectors",
             "_source_excludes": "source_excludes",
             "_source_includes": "source_includes",
             "from": "from_",
@@ -4611,7 +4393,6 @@ class Elasticsearch(BaseClient):
         ] = None,
         lenient: t.Optional[bool] = None,
         max_concurrent_shard_requests: t.Optional[int] = None,
-        min_compatible_shard_node: t.Optional[str] = None,
         min_score: t.Optional[float] = None,
         pit: t.Optional[t.Mapping[str, t.Any]] = None,
         post_filter: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -4619,6 +4400,7 @@ class Elasticsearch(BaseClient):
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         q: t.Optional[str] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         rank: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -4633,7 +4415,7 @@ class Elasticsearch(BaseClient):
         script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
@@ -4648,6 +4430,7 @@ class Elasticsearch(BaseClient):
             ]
         ] = None,
         source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None,
+        source_exclude_vectors: t.Optional[bool] = None,
         source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         stats: t.Optional[t.Sequence[str]] = None,
@@ -4686,7 +4469,7 @@ class Elasticsearch(BaseClient):
           This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams and indices,
@@ -4715,8 +4498,8 @@ class Elasticsearch(BaseClient):
             node and the remote clusters are minimized when running cross-cluster search
             (CCS) requests.
         :param collapse: Collapses search results the values of the specified field.
-        :param default_operator: The default operator for the query string query: `AND`
-            or `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for the query string query: `and`
+            or `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as a default when no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -4764,10 +4547,9 @@ class Elasticsearch(BaseClient):
             per node that the search runs concurrently. This value should be used to
             limit the impact of the search on the cluster in order to limit the number
             of concurrent shard requests.
-        :param min_compatible_shard_node: The minimum version of the node that can handle
-            the request. Any handling node with a lower version will fail the request.
         :param min_score: The minimum `_score` for matching documents. Documents with
-            a lower `_score` are not included in the search results.
+            a lower `_score` are not included in search results and results collected
+            by aggregations.
         :param pit: Limit the search to a point in time (PIT). If you provide a PIT,
             you cannot specify an `<index>` in the request path.
         :param post_filter: Use the `post_filter` parameter to filter search results.
@@ -4786,21 +4568,26 @@ class Elasticsearch(BaseClient):
         :param preference: The nodes and shards used for the search. By default, Elasticsearch
             selects from eligible nodes and shards using adaptive replica selection,
             accounting for allocation awareness. Valid values are: * `_only_local` to
-            run the search only on shards on the local node; * `_local` to, if possible,
+            run the search only on shards on the local node. * `_local` to, if possible,
             run the search on shards on the local node, or if not, select shards using
-            the default method; * `_only_nodes:<node-id>,<node-id>` to run the search
-            on only the specified nodes IDs, where, if suitable shards exist on more
-            than one selected node, use shards on those nodes using the default method,
-            or if none of the specified nodes are available, select shards from any available
-            node using the default method; * `_prefer_nodes:<node-id>,<node-id>` to if
-            possible, run the search on the specified nodes IDs, or if not, select shards
-            using the default method; * `_shards:<shard>,<shard>` to run the search only
-            on the specified shards; * `<custom-string>` (any string that does not start
-            with `_`) to route searches with the same `<custom-string>` to the same shards
-            in the same order.
+            the default method. * `_only_nodes:<node-id>,<node-id>` to run the search
+            on only the specified nodes IDs. If suitable shards exist on more than one
+            selected node, use shards on those nodes using the default method. If none
+            of the specified nodes are available, select shards from any available node
+            using the default method. * `_prefer_nodes:<node-id>,<node-id>` to if possible,
+            run the search on the specified nodes IDs. If not, select shards using the
+            default method. * `_shards:<shard>,<shard>` to run the search only on the
+            specified shards. You can combine this value with other `preference` values.
+            However, the `_shards` value must come first. For example: `_shards:2,3|_local`.
+            * `<custom-string>` (any string that does not start with `_`) to route searches
+            with the same `<custom-string>` to the same shards in the same order.
         :param profile: Set to `true` to return detailed timing information about the
             execution of individual components in a search request. NOTE: This is a debugging
             tool and adds significant overhead to search execution.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param q: A query in the Lucene query string syntax. Query parameter searches
             do not support the full Elasticsearch Query DSL but are handy for testing.
             IMPORTANT: This parameter overrides the query parameter in the request body.
@@ -4842,6 +4629,7 @@ class Elasticsearch(BaseClient):
             fields are returned in the `hits._source` property of the search response.
             If the `stored_fields` property is specified, the `_source` property defaults
             to `false`. Otherwise, it defaults to `true`.
+        :param source_exclude_vectors: Whether vectors should be excluded from _source
         :param source_excludes: A comma-separated list of source fields to exclude from
             the response. You can also use this parameter to exclude fields from the
             subset specified in `_source_includes` query parameter. If the `_source`
@@ -4948,14 +4736,14 @@ class Elasticsearch(BaseClient):
             __query["lenient"] = lenient
         if max_concurrent_shard_requests is not None:
             __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests
-        if min_compatible_shard_node is not None:
-            __query["min_compatible_shard_node"] = min_compatible_shard_node
         if pre_filter_shard_size is not None:
             __query["pre_filter_shard_size"] = pre_filter_shard_size
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if q is not None:
             __query["q"] = q
         if request_cache is not None:
@@ -4968,6 +4756,8 @@ class Elasticsearch(BaseClient):
             __query["scroll"] = scroll
         if search_type is not None:
             __query["search_type"] = search_type
+        if source_exclude_vectors is not None:
+            __query["_source_exclude_vectors"] = source_exclude_vectors
         if source_excludes is not None:
             __query["_source_excludes"] = source_excludes
         if source_includes is not None:
@@ -5108,6 +4898,7 @@ class Elasticsearch(BaseClient):
         ] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         size: t.Optional[int] = None,
@@ -5135,51 +4926,6 @@ class Elasticsearch(BaseClient):
           <li>Optionally, a <code>geo_bounds</code> aggregation on the <code>&lt;field&gt;</code>. The search only includes this aggregation if the <code>exact_bounds</code> parameter is <code>true</code>.</li>
           <li>If the optional parameter <code>with_labels</code> is <code>true</code>, the internal search will include a dynamic runtime field that calls the <code>getLabelPosition</code> function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.</li>
           </ul>
-          <p>For example, Elasticsearch may translate a vector tile search API request with a <code>grid_agg</code> argument of <code>geotile</code> and an <code>exact_bounds</code> argument of <code>true</code> into the following search</p>
-          <pre><code>GET my-index/_search
-          {
-            &quot;size&quot;: 10000,
-            &quot;query&quot;: {
-              &quot;geo_bounding_box&quot;: {
-                &quot;my-geo-field&quot;: {
-                  &quot;top_left&quot;: {
-                    &quot;lat&quot;: -40.979898069620134,
-                    &quot;lon&quot;: -45
-                  },
-                  &quot;bottom_right&quot;: {
-                    &quot;lat&quot;: -66.51326044311186,
-                    &quot;lon&quot;: 0
-                  }
-                }
-              }
-            },
-            &quot;aggregations&quot;: {
-              &quot;grid&quot;: {
-                &quot;geotile_grid&quot;: {
-                  &quot;field&quot;: &quot;my-geo-field&quot;,
-                  &quot;precision&quot;: 11,
-                  &quot;size&quot;: 65536,
-                  &quot;bounds&quot;: {
-                    &quot;top_left&quot;: {
-                      &quot;lat&quot;: -40.979898069620134,
-                      &quot;lon&quot;: -45
-                    },
-                    &quot;bottom_right&quot;: {
-                      &quot;lat&quot;: -66.51326044311186,
-                      &quot;lon&quot;: 0
-                    }
-                  }
-                }
-              },
-              &quot;bounds&quot;: {
-                &quot;geo_bounds&quot;: {
-                  &quot;field&quot;: &quot;my-geo-field&quot;,
-                  &quot;wrap_longitude&quot;: false
-                }
-              }
-            }
-          }
-          </code></pre>
           <p>The API returns results as a binary Mapbox vector tile.
           Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:</p>
           <ul>
@@ -5434,9 +5180,10 @@ class Elasticsearch(BaseClient):
           Some cells may intersect more than one vector tile.
           To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.
           Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.</p>
+          <p>Learn how to use the vector tile search API with practical examples in the <a href="https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search">Vector tile search examples</a> guide.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-vector-tile-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt>`_
 
         :param index: Comma-separated list of data streams, indices, or aliases to search
         :param field: Field containing geospatial data to return
@@ -5469,6 +5216,10 @@ class Elasticsearch(BaseClient):
             In the aggs layer, each feature represents a `geotile_grid` cell. If `grid,
             each feature is a polygon of the cells bounding box. If `point`, each feature
             is a Point that is the centroid of the cell.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param query: The query DSL used to filter documents for the search.
         :param runtime_mappings: Defines one or more runtime fields in the search request.
             These fields take precedence over mapped fields with the same name.
@@ -5532,6 +5283,8 @@ class Elasticsearch(BaseClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if not __body:
             if aggs is not None:
                 __body["aggs"] = aggs
@@ -5595,6 +5348,7 @@ class Elasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
@@ -5609,7 +5363,7 @@ class Elasticsearch(BaseClient):
           <p>If the Elasticsearch security features are enabled, you must have the <code>view_index_metadata</code> or <code>manage</code> index privilege for the target data stream, index, or alias.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-shards.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams and indices,
@@ -5622,11 +5376,15 @@ class Elasticsearch(BaseClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param local: If `true`, the request retrieves information from the local node
             only.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. IT can also be set to `-1` to indicate that the
+            request should never timeout.
         :param preference: The node or shard the operation should be performed on. It
             is random by default.
         :param routing: A custom value used to route operations to a specific shard.
@@ -5653,6 +5411,8 @@ class Elasticsearch(BaseClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if local is not None:
             __query["local"] = local
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
@@ -5698,13 +5458,14 @@ class Elasticsearch(BaseClient):
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         rest_total_hits_as_int: t.Optional[bool] = None,
         routing: t.Optional[str] = None,
         scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
         ] = None,
-        source: t.Optional[str] = None,
+        source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None,
         typed_keys: t.Optional[bool] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -5714,7 +5475,7 @@ class Elasticsearch(BaseClient):
           <p>Run a search with a search template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-template-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`).
@@ -5728,8 +5489,7 @@ class Elasticsearch(BaseClient):
         :param expand_wildcards: The type of index that wildcard patterns can match.
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. Supports comma-separated
-            values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`,
-            `hidden`, `none`.
+            values, such as `open,hidden`.
         :param explain: If `true`, returns detailed information about score calculation
             as part of each hit. If you specify both this and the `explain` query parameter,
             the API uses only the query parameter.
@@ -5744,6 +5504,10 @@ class Elasticsearch(BaseClient):
         :param preference: The node or shard the operation should be performed on. It
             is random by default.
         :param profile: If `true`, the query execution is profiled.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param rest_total_hits_as_int: If `true`, `hits.total` is rendered as an integer
             in the response. If `false`, it is rendered as an object.
         :param routing: A custom value used to route operations to a specific shard.
@@ -5785,6 +5549,8 @@ class Elasticsearch(BaseClient):
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if rest_total_hits_as_int is not None:
             __query["rest_total_hits_as_int"] = rest_total_hits_as_int
         if routing is not None:
@@ -5857,7 +5623,7 @@ class Elasticsearch(BaseClient):
           </blockquote>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-terms-enum.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum>`_
 
         :param index: A comma-separated list of data streams, indices, and index aliases
             to search. Wildcard (`*`) expressions are supported. To search all data streams
@@ -5911,11 +5677,7 @@ class Elasticsearch(BaseClient):
                 __body["string"] = string
             if timeout is not None:
                 __body["timeout"] = timeout
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -5927,7 +5689,20 @@ class Elasticsearch(BaseClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("doc", "filter", "per_field_analyzer"),
+        body_fields=(
+            "doc",
+            "field_statistics",
+            "fields",
+            "filter",
+            "offsets",
+            "payloads",
+            "per_field_analyzer",
+            "positions",
+            "routing",
+            "term_statistics",
+            "version",
+            "version_type",
+        ),
     )
     def termvectors(
         self,
@@ -5937,7 +5712,7 @@ class Elasticsearch(BaseClient):
         doc: t.Optional[t.Mapping[str, t.Any]] = None,
         error_trace: t.Optional[bool] = None,
         field_statistics: t.Optional[bool] = None,
-        fields: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        fields: t.Optional[t.Sequence[str]] = None,
         filter: t.Optional[t.Mapping[str, t.Any]] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -5952,7 +5727,7 @@ class Elasticsearch(BaseClient):
         term_statistics: t.Optional[bool] = None,
         version: t.Optional[int] = None,
         version_type: t.Optional[
-            t.Union[str, t.Literal["external", "external_gte", "force", "internal"]]
+            t.Union[str, t.Literal["external", "external_gte", "internal"]]
         ] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -5990,10 +5765,11 @@ class Elasticsearch(BaseClient):
           The information is only retrieved for the shard the requested document resides in.
           The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.
           By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.
-          Use <code>routing</code> only to hit a particular shard.</p>
+          Use <code>routing</code> only to hit a particular shard.
+          Refer to the linked documentation for detailed examples of how to use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-termvectors.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors>`_
 
         :param index: The name of the index that contains the document.
         :param id: A unique identifier for the document.
@@ -6004,9 +5780,9 @@ class Elasticsearch(BaseClient):
             (the sum of document frequencies for all terms in this field). * The sum
             of total term frequencies (the sum of total term frequencies of each term
             in this field).
-        :param fields: A comma-separated list or wildcard expressions of fields to include
-            in the statistics. It is used as the default list unless a specific field
-            list is provided in the `completion_fields` or `fielddata_fields` parameters.
+        :param fields: A list of fields to include in the statistics. It is used as the
+            default list unless a specific field list is provided in the `completion_fields`
+            or `fielddata_fields` parameters.
         :param filter: Filter terms based on their tf-idf scores. This could be useful
             in order find out a good characteristic vector of a document. This feature
             works in a similar manner to the second phase of the More Like This Query.
@@ -6044,41 +5820,41 @@ class Elasticsearch(BaseClient):
         __body: t.Dict[str, t.Any] = body if body is not None else {}
         if error_trace is not None:
             __query["error_trace"] = error_trace
-        if field_statistics is not None:
-            __query["field_statistics"] = field_statistics
-        if fields is not None:
-            __query["fields"] = fields
         if filter_path is not None:
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
-        if offsets is not None:
-            __query["offsets"] = offsets
-        if payloads is not None:
-            __query["payloads"] = payloads
-        if positions is not None:
-            __query["positions"] = positions
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
         if realtime is not None:
             __query["realtime"] = realtime
-        if routing is not None:
-            __query["routing"] = routing
-        if term_statistics is not None:
-            __query["term_statistics"] = term_statistics
-        if version is not None:
-            __query["version"] = version
-        if version_type is not None:
-            __query["version_type"] = version_type
         if not __body:
             if doc is not None:
                 __body["doc"] = doc
+            if field_statistics is not None:
+                __body["field_statistics"] = field_statistics
+            if fields is not None:
+                __body["fields"] = fields
             if filter is not None:
                 __body["filter"] = filter
+            if offsets is not None:
+                __body["offsets"] = offsets
+            if payloads is not None:
+                __body["payloads"] = payloads
             if per_field_analyzer is not None:
                 __body["per_field_analyzer"] = per_field_analyzer
+            if positions is not None:
+                __body["positions"] = positions
+            if routing is not None:
+                __body["routing"] = routing
+            if term_statistics is not None:
+                __body["term_statistics"] = term_statistics
+            if version is not None:
+                __body["version"] = version
+            if version_type is not None:
+                __body["version_type"] = version_type
         if not __body:
             __body = None  # type: ignore[assignment]
         __headers = {"accept": "application/json"}
@@ -6123,6 +5899,7 @@ class Elasticsearch(BaseClient):
         human: t.Optional[bool] = None,
         if_primary_term: t.Optional[int] = None,
         if_seq_no: t.Optional[int] = None,
+        include_source_on_error: t.Optional[bool] = None,
         lang: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         refresh: t.Optional[
@@ -6160,10 +5937,11 @@ class Elasticsearch(BaseClient):
           </ul>
           <p>The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.</p>
           <p>The <code>_source</code> field must be enabled to use this API.
-          In addition to <code>_source</code>, you can access the following variables through the <code>ctx</code> map: <code>_index</code>, <code>_type</code>, <code>_id</code>, <code>_version</code>, <code>_routing</code>, and <code>_now</code> (the current timestamp).</p>
+          In addition to <code>_source</code>, you can access the following variables through the <code>ctx</code> map: <code>_index</code>, <code>_type</code>, <code>_id</code>, <code>_version</code>, <code>_routing</code>, and <code>_now</code> (the current timestamp).
+          For usage examples such as partial updates, upserts, and scripted updates, see the External documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-update.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update>`_
 
         :param index: The name of the target index. By default, the index is created
             automatically if it doesn't exist.
@@ -6178,6 +5956,8 @@ class Elasticsearch(BaseClient):
             term.
         :param if_seq_no: Only perform the operation if the document has this sequence
             number.
+        :param include_source_on_error: True or false if to include the document source
+            in the error message in case of parsing errors.
         :param lang: The script language.
         :param refresh: If 'true', Elasticsearch refreshes the affected shards to make
             this operation visible to search. If 'wait_for', it waits for a refresh to
@@ -6222,6 +6002,8 @@ class Elasticsearch(BaseClient):
             __query["if_primary_term"] = if_primary_term
         if if_seq_no is not None:
             __query["if_seq_no"] = if_seq_no
+        if include_source_on_error is not None:
+            __query["include_source_on_error"] = include_source_on_error
         if lang is not None:
             __query["lang"] = lang
         if pretty is not None:
@@ -6349,6 +6131,24 @@ class Elasticsearch(BaseClient):
           A bulk update request is performed for each batch of matching documents.
           Any query or update failures cause the update by query request to fail and the failures are shown in the response.
           Any update requests that completed successfully still stick, they are not rolled back.</p>
+          <p><strong>Refreshing shards</strong></p>
+          <p>Specifying the <code>refresh</code> parameter refreshes all shards once the request completes.
+          This is different to the update API's <code>refresh</code> parameter, which causes only the shard
+          that received the request to be refreshed. Unlike the update API, it does not support
+          <code>wait_for</code>.</p>
+          <p><strong>Running update by query asynchronously</strong></p>
+          <p>If the request contains <code>wait_for_completion=false</code>, Elasticsearch
+          performs some preflight checks, launches the request, and returns a
+          <a href="https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks">task</a> you can use to cancel or get the status of the task.
+          Elasticsearch creates a record of this task as a document at <code>.tasks/task/${taskId}</code>.</p>
+          <p><strong>Waiting for active shards</strong></p>
+          <p><code>wait_for_active_shards</code> controls how many copies of a shard must be active
+          before proceeding with the request. See <a href="https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards"><code>wait_for_active_shards</code></a>
+          for details. <code>timeout</code> controls how long each write request waits for unavailable
+          shards to become available. Both work exactly the way they work in the
+          <a href="https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk">Bulk API</a>. Update by query uses scrolled searches, so you can also
+          specify the <code>scroll</code> parameter to control how long it keeps the search context
+          alive, for example <code>?scroll=10m</code>. The default is 5 minutes.</p>
           <p><strong>Throttling update requests</strong></p>
           <p>To control the rate at which update by query issues batches of update operations, you can set <code>requests_per_second</code> to any positive decimal number.
           This pads each batch with a wait time to throttle the rate.
@@ -6383,21 +6183,11 @@ class Elasticsearch(BaseClient):
           <li>Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.</li>
           <li>Update performance scales linearly across available resources with the number of slices.</li>
           </ul>
-          <p>Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.</p>
-          <p><strong>Update the document source</strong></p>
-          <p>Update by query supports scripts to update the document source.
-          As with the update API, you can set <code>ctx.op</code> to change the operation that is performed.</p>
-          <p>Set <code>ctx.op = &quot;noop&quot;</code> if your script decides that it doesn't have to make any changes.
-          The update by query operation skips updating the document and increments the <code>noop</code> counter.</p>
-          <p>Set <code>ctx.op = &quot;delete&quot;</code> if your script decides that the document should be deleted.
-          The update by query operation deletes the document and increments the <code>deleted</code> counter.</p>
-          <p>Update by query supports only <code>index</code>, <code>noop</code>, and <code>delete</code>.
-          Setting <code>ctx.op</code> to anything else is an error.
-          Setting any other field in <code>ctx</code> is an error.
-          This API enables you to only modify the source of matching documents; you cannot move them.</p>
+          <p>Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.
+          Refer to the linked documentation for examples of how to update documents using the <code>_update_by_query</code> API:</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-update-by-query.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             search. It supports wildcards (`*`). To search all data streams or indices,
@@ -6413,8 +6203,8 @@ class Elasticsearch(BaseClient):
             be used only when the `q` query string parameter is specified.
         :param conflicts: The preferred behavior when update by query hits version conflicts:
             `abort` or `proceed`.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`. This parameter can be used only when the `q` query string parameter
+        :param default_operator: The default operator for query string query: `and` or
+            `or`. This parameter can be used only when the `q` query string parameter
             is specified.
         :param df: The field to use as default where no field prefix is given in the
             query string. This parameter can be used only when the `q` query string parameter
@@ -6422,9 +6212,8 @@ class Elasticsearch(BaseClient):
         :param expand_wildcards: The type of index that wildcard patterns can match.
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. It supports comma-separated
-            values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`,
-            `hidden`, `none`.
-        :param from_: Starting offset (default: 0)
+            values, such as `open,hidden`.
+        :param from_: Skips the specified number of documents.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param lenient: If `true`, format-based query failures (such as providing text
@@ -6617,7 +6406,7 @@ class Elasticsearch(BaseClient):
           Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/docs-update-by-query.html#docs-update-by-query-rethrottle>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle>`_
 
         :param task_id: The ID for the task.
         :param requests_per_second: The throttle for this request in sub-requests per
diff -pruN 8.17.2-2/elasticsearch/_sync/client/_base.py 9.2.0-1/elasticsearch/_sync/client/_base.py
--- 8.17.2-2/elasticsearch/_sync/client/_base.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/_base.py	2025-10-28 16:50:53.000000000 +0000
@@ -174,7 +174,7 @@ def create_sniff_callback(
                     "GET",
                     "/_nodes/_all/http",
                     headers={
-                        "accept": "application/vnd.elasticsearch+json; compatible-with=8"
+                        "accept": "application/vnd.elasticsearch+json; compatible-with=9"
                     },
                     request_timeout=(
                         sniff_options.sniff_timeout
@@ -298,7 +298,6 @@ class BaseClient:
         def mimetype_header_to_compat(header: str) -> None:
             # Converts all parts of a Accept/Content-Type headers
             # from application/X -> application/vnd.elasticsearch+X
-            nonlocal request_headers
             mimetype = request_headers.get(header, None)
             if mimetype:
                 request_headers[header] = _COMPAT_MIMETYPE_RE.sub(
diff -pruN 8.17.2-2/elasticsearch/_sync/client/async_search.py 9.2.0-1/elasticsearch/_sync/client/async_search.py
--- 8.17.2-2/elasticsearch/_sync/client/async_search.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/async_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class AsyncSearchClient(NamespacedClient
           If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the <code>cancel_task</code> cluster privilege.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/async-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit>`_
 
         :param id: A unique identifier for the async search.
         """
@@ -94,11 +94,11 @@ class AsyncSearchClient(NamespacedClient
           If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/async-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit>`_
 
         :param id: A unique identifier for the async search.
-        :param keep_alive: Specifies how long the async search should be available in
-            the cluster. When not specified, the `keep_alive` set with the corresponding
+        :param keep_alive: The length of time that the async search should be available
+            in the cluster. When not specified, the `keep_alive` set with the corresponding
             submit async request will be used. Otherwise, it is possible to override
             the value and extend the validity of the request. When this period expires,
             the search, if still running, is cancelled. If the search is completed, its
@@ -157,13 +157,17 @@ class AsyncSearchClient(NamespacedClient
 
           <p>Get the async search status.</p>
           <p>Get the status of a previously submitted async search request given its identifier, without retrieving search results.
-          If the Elasticsearch security features are enabled, use of this API is restricted to the <code>monitoring_user</code> role.</p>
+          If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:</p>
+          <ul>
+          <li>The user or API key that submitted the original async search request.</li>
+          <li>Users that have the <code>monitor</code> cluster privilege or greater privileges.</li>
+          </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/async-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit>`_
 
         :param id: A unique identifier for the async search.
-        :param keep_alive: Specifies how long the async search needs to be available.
+        :param keep_alive: The length of time that the async search needs to be available.
             Ongoing async searches and any saved search results are deleted after this
             period.
         """
@@ -277,13 +281,13 @@ class AsyncSearchClient(NamespacedClient
         ] = None,
         lenient: t.Optional[bool] = None,
         max_concurrent_shard_requests: t.Optional[int] = None,
-        min_compatible_shard_node: t.Optional[str] = None,
         min_score: t.Optional[float] = None,
         pit: t.Optional[t.Mapping[str, t.Any]] = None,
         post_filter: t.Optional[t.Mapping[str, t.Any]] = None,
         preference: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         q: t.Optional[str] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         request_cache: t.Optional[bool] = None,
@@ -295,7 +299,7 @@ class AsyncSearchClient(NamespacedClient
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
@@ -342,7 +346,7 @@ class AsyncSearchClient(NamespacedClient
           The maximum allowed size for a stored async search response can be set by changing the <code>search.max_async_search_response_size</code> cluster level setting.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/async-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit>`_
 
         :param index: A comma-separated list of index names to search; use `_all` or
             empty string to perform the operation on all indices
@@ -397,15 +401,18 @@ class AsyncSearchClient(NamespacedClient
             per node this search executes concurrently. This value should be used to
             limit the impact of the search on the cluster in order to limit the number
             of concurrent shard requests
-        :param min_compatible_shard_node:
         :param min_score: Minimum _score for matching documents. Documents with a lower
-            _score are not included in the search results.
+            _score are not included in search results and results collected by aggregations.
         :param pit: Limits the search to a point in time (PIT). If you provide a PIT,
             you cannot specify an <index> in the request path.
         :param post_filter:
         :param preference: Specify the node or shard the operation should be performed
             on (default: random)
         :param profile:
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param q: Query in the Lucene query string syntax
         :param query: Defines the search definition using the Query DSL.
         :param request_cache: Specify if request cache should be used for this request
@@ -522,12 +529,12 @@ class AsyncSearchClient(NamespacedClient
             __query["lenient"] = lenient
         if max_concurrent_shard_requests is not None:
             __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests
-        if min_compatible_shard_node is not None:
-            __query["min_compatible_shard_node"] = min_compatible_shard_node
         if preference is not None:
             __query["preference"] = preference
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if q is not None:
             __query["q"] = q
         if request_cache is not None:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/autoscaling.py 9.2.0-1/elasticsearch/_sync/client/autoscaling.py
--- 8.17.2-2/elasticsearch/_sync/client/autoscaling.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/autoscaling.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class AutoscalingClient(NamespacedClient
           <p>NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/autoscaling-delete-autoscaling-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy>`_
 
         :param name: the name of the autoscaling policy
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -104,7 +104,7 @@ class AutoscalingClient(NamespacedClient
           Do not use this information to make autoscaling decisions.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/autoscaling-get-autoscaling-capacity.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity>`_
 
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
@@ -151,7 +151,7 @@ class AutoscalingClient(NamespacedClient
           <p>NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/autoscaling-get-autoscaling-capacity.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity>`_
 
         :param name: the name of the autoscaling policy
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -206,7 +206,7 @@ class AutoscalingClient(NamespacedClient
           <p>NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/autoscaling-put-autoscaling-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy>`_
 
         :param name: the name of the autoscaling policy
         :param policy:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/cat.py 9.2.0-1/elasticsearch/_sync/client/cat.py
--- 8.17.2-2/elasticsearch/_sync/client/cat.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/cat.py	2025-10-28 16:50:53.000000000 +0000
@@ -36,6 +36,9 @@ class CatClient(NamespacedClient):
         self,
         *,
         name: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         expand_wildcards: t.Optional[
             t.Union[
@@ -47,13 +50,42 @@ class CatClient(NamespacedClient):
         ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "alias",
+                            "filter",
+                            "index",
+                            "is_write_index",
+                            "routing.index",
+                            "routing.search",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "alias",
+                        "filter",
+                        "index",
+                        "is_write_index",
+                        "routing.index",
+                        "routing.search",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
-        local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -65,23 +97,28 @@ class CatClient(NamespacedClient):
           <p>IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-alias.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases>`_
 
         :param name: A comma-separated list of aliases to retrieve. Supports wildcards
             (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param expand_wildcards: The type of index that wildcard patterns can match.
             If the request can target data streams, this argument determines whether
             wildcard expressions match hidden data streams. It supports comma-separated
             values, such as `open,hidden`.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
-        :param local: If `true`, the request computes the list of selected nodes from
-            the local cluster state. If `false` the list of selected nodes are computed
-            from the cluster state of the master node. In both cases the coordinating
-            node will send requests for further information to each selected node.
         :param master_timeout: The period to wait for a connection to the master node.
             If the master node is not available before the timeout expires, the request
             fails and returns an error. To indicated that the request should never timeout,
@@ -89,6 +126,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -99,6 +142,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/aliases"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if expand_wildcards is not None:
@@ -113,14 +158,14 @@ class CatClient(NamespacedClient):
             __query["help"] = help
         if human is not None:
             __query["human"] = human
-        if local is not None:
-            __query["local"] = local
         if master_timeout is not None:
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -144,13 +189,57 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "disk.avail",
+                            "disk.indices",
+                            "disk.indices.forecast",
+                            "disk.percent",
+                            "disk.total",
+                            "disk.used",
+                            "host",
+                            "ip",
+                            "node",
+                            "node.role",
+                            "shards",
+                            "shards.undesired",
+                            "write_load.forecast",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "disk.avail",
+                        "disk.indices",
+                        "disk.indices.forecast",
+                        "disk.percent",
+                        "disk.total",
+                        "disk.used",
+                        "host",
+                        "ip",
+                        "node",
+                        "node.role",
+                        "shards",
+                        "shards.undesired",
+                        "write_load.forecast",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -161,14 +250,22 @@ class CatClient(NamespacedClient):
           <p>IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-allocation.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation>`_
 
         :param node_id: A comma-separated list of node identifiers or names used to limit
             the returned information.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -179,6 +276,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -211,6 +314,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -228,16 +333,51 @@ class CatClient(NamespacedClient):
         self,
         *,
         name: t.Optional[str] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "alias_count",
+                            "included_in",
+                            "mapping_count",
+                            "metadata_count",
+                            "name",
+                            "settings_count",
+                            "version",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "alias_count",
+                        "included_in",
+                        "mapping_count",
+                        "metadata_count",
+                        "name",
+                        "settings_count",
+                        "version",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -250,13 +390,22 @@ class CatClient(NamespacedClient):
           They are not intended for use by applications. For application consumption, use the get component template API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-component-templates.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates>`_
 
         :param name: The name of the component template. It accepts wildcard expressions.
             If it is omitted, all component templates are returned.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -267,6 +416,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -277,6 +432,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/component_templates"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -297,6 +454,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -314,14 +473,26 @@ class CatClient(NamespacedClient):
         self,
         *,
         index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[t.Union[str, t.Literal["count", "epoch", "timestamp"]]],
+                t.Union[str, t.Literal["count", "epoch", "timestamp"]],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -334,19 +505,38 @@ class CatClient(NamespacedClient):
           They are not intended for use by applications. For application consumption, use the count API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-count.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. It supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -357,6 +547,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/count"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -371,8 +563,12 @@ class CatClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -396,11 +592,21 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]]
+                ],
+                t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -412,19 +618,33 @@ class CatClient(NamespacedClient):
           They are not intended for use by applications. For application consumption, use the nodes stats API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-fielddata.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata>`_
 
         :param fields: Comma-separated list of fields used to limit returned information.
             To retrieve all fields, omit this parameter.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -453,6 +673,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -469,10 +691,58 @@ class CatClient(NamespacedClient):
     def health(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "active_shards_percent",
+                            "cluster",
+                            "epoch",
+                            "init",
+                            "max_task_wait_time",
+                            "node.data",
+                            "node.total",
+                            "pending_tasks",
+                            "pri",
+                            "relo",
+                            "shards",
+                            "status",
+                            "timestamp",
+                            "unassign",
+                            "unassign.pri",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "active_shards_percent",
+                        "cluster",
+                        "epoch",
+                        "init",
+                        "max_task_wait_time",
+                        "node.data",
+                        "node.total",
+                        "pending_tasks",
+                        "pri",
+                        "relo",
+                        "shards",
+                        "status",
+                        "timestamp",
+                        "unassign",
+                        "unassign.pri",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
@@ -498,23 +768,39 @@ class CatClient(NamespacedClient):
           You also can use the API to track the recovery of a large cluster over a longer period of time.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-health.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param ts: If true, returns `HH:MM:SS` and Unix epoch timestamps.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/health"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -556,7 +842,7 @@ class CatClient(NamespacedClient):
           <p>Get help for the CAT APIs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat"
@@ -590,8 +876,319 @@ class CatClient(NamespacedClient):
         ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        health: t.Optional[t.Union[str, t.Literal["green", "red", "yellow"]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "bulk.avg_size_in_bytes",
+                            "bulk.avg_time",
+                            "bulk.total_operations",
+                            "bulk.total_size_in_bytes",
+                            "bulk.total_time",
+                            "completion.size",
+                            "creation.date",
+                            "creation.date.string",
+                            "dataset.size",
+                            "dense_vector.value_count",
+                            "docs.count",
+                            "docs.deleted",
+                            "fielddata.evictions",
+                            "fielddata.memory_size",
+                            "flush.total",
+                            "flush.total_time",
+                            "get.current",
+                            "get.exists_time",
+                            "get.exists_total",
+                            "get.missing_time",
+                            "get.missing_total",
+                            "get.time",
+                            "get.total",
+                            "health",
+                            "index",
+                            "indexing.delete_current",
+                            "indexing.delete_time",
+                            "indexing.delete_total",
+                            "indexing.index_current",
+                            "indexing.index_failed",
+                            "indexing.index_failed_due_to_version_conflict",
+                            "indexing.index_time",
+                            "indexing.index_total",
+                            "memory.total",
+                            "merges.current",
+                            "merges.current_docs",
+                            "merges.current_size",
+                            "merges.total",
+                            "merges.total_docs",
+                            "merges.total_size",
+                            "merges.total_time",
+                            "pri",
+                            "pri.bulk.avg_size_in_bytes",
+                            "pri.bulk.avg_time",
+                            "pri.bulk.total_operations",
+                            "pri.bulk.total_size_in_bytes",
+                            "pri.bulk.total_time",
+                            "pri.completion.size",
+                            "pri.dense_vector.value_count",
+                            "pri.fielddata.evictions",
+                            "pri.fielddata.memory_size",
+                            "pri.flush.total",
+                            "pri.flush.total_time",
+                            "pri.get.current",
+                            "pri.get.exists_time",
+                            "pri.get.exists_total",
+                            "pri.get.missing_time",
+                            "pri.get.missing_total",
+                            "pri.get.time",
+                            "pri.get.total",
+                            "pri.indexing.delete_current",
+                            "pri.indexing.delete_time",
+                            "pri.indexing.delete_total",
+                            "pri.indexing.index_current",
+                            "pri.indexing.index_failed",
+                            "pri.indexing.index_failed_due_to_version_conflict",
+                            "pri.indexing.index_time",
+                            "pri.indexing.index_total",
+                            "pri.memory.total",
+                            "pri.merges.current",
+                            "pri.merges.current_docs",
+                            "pri.merges.current_size",
+                            "pri.merges.total",
+                            "pri.merges.total_docs",
+                            "pri.merges.total_size",
+                            "pri.merges.total_time",
+                            "pri.query_cache.evictions",
+                            "pri.query_cache.memory_size",
+                            "pri.refresh.external_time",
+                            "pri.refresh.external_total",
+                            "pri.refresh.listeners",
+                            "pri.refresh.time",
+                            "pri.refresh.total",
+                            "pri.request_cache.evictions",
+                            "pri.request_cache.hit_count",
+                            "pri.request_cache.memory_size",
+                            "pri.request_cache.miss_count",
+                            "pri.search.fetch_current",
+                            "pri.search.fetch_time",
+                            "pri.search.fetch_total",
+                            "pri.search.open_contexts",
+                            "pri.search.query_current",
+                            "pri.search.query_time",
+                            "pri.search.query_total",
+                            "pri.search.scroll_current",
+                            "pri.search.scroll_time",
+                            "pri.search.scroll_total",
+                            "pri.segments.count",
+                            "pri.segments.fixed_bitset_memory",
+                            "pri.segments.index_writer_memory",
+                            "pri.segments.memory",
+                            "pri.segments.version_map_memory",
+                            "pri.sparse_vector.value_count",
+                            "pri.store.size",
+                            "pri.suggest.current",
+                            "pri.suggest.time",
+                            "pri.suggest.total",
+                            "pri.warmer.current",
+                            "pri.warmer.total",
+                            "pri.warmer.total_time",
+                            "query_cache.evictions",
+                            "query_cache.memory_size",
+                            "refresh.external_time",
+                            "refresh.external_total",
+                            "refresh.listeners",
+                            "refresh.time",
+                            "refresh.total",
+                            "rep",
+                            "request_cache.evictions",
+                            "request_cache.hit_count",
+                            "request_cache.memory_size",
+                            "request_cache.miss_count",
+                            "search.fetch_current",
+                            "search.fetch_time",
+                            "search.fetch_total",
+                            "search.open_contexts",
+                            "search.query_current",
+                            "search.query_time",
+                            "search.query_total",
+                            "search.scroll_current",
+                            "search.scroll_time",
+                            "search.scroll_total",
+                            "segments.count",
+                            "segments.fixed_bitset_memory",
+                            "segments.index_writer_memory",
+                            "segments.memory",
+                            "segments.version_map_memory",
+                            "sparse_vector.value_count",
+                            "status",
+                            "store.size",
+                            "suggest.current",
+                            "suggest.time",
+                            "suggest.total",
+                            "uuid",
+                            "warmer.current",
+                            "warmer.total",
+                            "warmer.total_time",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "bulk.avg_size_in_bytes",
+                        "bulk.avg_time",
+                        "bulk.total_operations",
+                        "bulk.total_size_in_bytes",
+                        "bulk.total_time",
+                        "completion.size",
+                        "creation.date",
+                        "creation.date.string",
+                        "dataset.size",
+                        "dense_vector.value_count",
+                        "docs.count",
+                        "docs.deleted",
+                        "fielddata.evictions",
+                        "fielddata.memory_size",
+                        "flush.total",
+                        "flush.total_time",
+                        "get.current",
+                        "get.exists_time",
+                        "get.exists_total",
+                        "get.missing_time",
+                        "get.missing_total",
+                        "get.time",
+                        "get.total",
+                        "health",
+                        "index",
+                        "indexing.delete_current",
+                        "indexing.delete_time",
+                        "indexing.delete_total",
+                        "indexing.index_current",
+                        "indexing.index_failed",
+                        "indexing.index_failed_due_to_version_conflict",
+                        "indexing.index_time",
+                        "indexing.index_total",
+                        "memory.total",
+                        "merges.current",
+                        "merges.current_docs",
+                        "merges.current_size",
+                        "merges.total",
+                        "merges.total_docs",
+                        "merges.total_size",
+                        "merges.total_time",
+                        "pri",
+                        "pri.bulk.avg_size_in_bytes",
+                        "pri.bulk.avg_time",
+                        "pri.bulk.total_operations",
+                        "pri.bulk.total_size_in_bytes",
+                        "pri.bulk.total_time",
+                        "pri.completion.size",
+                        "pri.dense_vector.value_count",
+                        "pri.fielddata.evictions",
+                        "pri.fielddata.memory_size",
+                        "pri.flush.total",
+                        "pri.flush.total_time",
+                        "pri.get.current",
+                        "pri.get.exists_time",
+                        "pri.get.exists_total",
+                        "pri.get.missing_time",
+                        "pri.get.missing_total",
+                        "pri.get.time",
+                        "pri.get.total",
+                        "pri.indexing.delete_current",
+                        "pri.indexing.delete_time",
+                        "pri.indexing.delete_total",
+                        "pri.indexing.index_current",
+                        "pri.indexing.index_failed",
+                        "pri.indexing.index_failed_due_to_version_conflict",
+                        "pri.indexing.index_time",
+                        "pri.indexing.index_total",
+                        "pri.memory.total",
+                        "pri.merges.current",
+                        "pri.merges.current_docs",
+                        "pri.merges.current_size",
+                        "pri.merges.total",
+                        "pri.merges.total_docs",
+                        "pri.merges.total_size",
+                        "pri.merges.total_time",
+                        "pri.query_cache.evictions",
+                        "pri.query_cache.memory_size",
+                        "pri.refresh.external_time",
+                        "pri.refresh.external_total",
+                        "pri.refresh.listeners",
+                        "pri.refresh.time",
+                        "pri.refresh.total",
+                        "pri.request_cache.evictions",
+                        "pri.request_cache.hit_count",
+                        "pri.request_cache.memory_size",
+                        "pri.request_cache.miss_count",
+                        "pri.search.fetch_current",
+                        "pri.search.fetch_time",
+                        "pri.search.fetch_total",
+                        "pri.search.open_contexts",
+                        "pri.search.query_current",
+                        "pri.search.query_time",
+                        "pri.search.query_total",
+                        "pri.search.scroll_current",
+                        "pri.search.scroll_time",
+                        "pri.search.scroll_total",
+                        "pri.segments.count",
+                        "pri.segments.fixed_bitset_memory",
+                        "pri.segments.index_writer_memory",
+                        "pri.segments.memory",
+                        "pri.segments.version_map_memory",
+                        "pri.sparse_vector.value_count",
+                        "pri.store.size",
+                        "pri.suggest.current",
+                        "pri.suggest.time",
+                        "pri.suggest.total",
+                        "pri.warmer.current",
+                        "pri.warmer.total",
+                        "pri.warmer.total_time",
+                        "query_cache.evictions",
+                        "query_cache.memory_size",
+                        "refresh.external_time",
+                        "refresh.external_total",
+                        "refresh.listeners",
+                        "refresh.time",
+                        "refresh.total",
+                        "rep",
+                        "request_cache.evictions",
+                        "request_cache.hit_count",
+                        "request_cache.memory_size",
+                        "request_cache.miss_count",
+                        "search.fetch_current",
+                        "search.fetch_time",
+                        "search.fetch_total",
+                        "search.open_contexts",
+                        "search.query_current",
+                        "search.query_time",
+                        "search.query_total",
+                        "search.scroll_current",
+                        "search.scroll_time",
+                        "search.scroll_total",
+                        "segments.count",
+                        "segments.fixed_bitset_memory",
+                        "segments.index_writer_memory",
+                        "segments.memory",
+                        "segments.version_map_memory",
+                        "sparse_vector.value_count",
+                        "status",
+                        "store.size",
+                        "suggest.current",
+                        "suggest.time",
+                        "suggest.total",
+                        "uuid",
+                        "warmer.current",
+                        "warmer.total",
+                        "warmer.total_time",
+                    ],
+                ],
+            ]
+        ] = None,
+        health: t.Optional[
+            t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         include_unloaded_segments: t.Optional[bool] = None,
@@ -623,16 +1220,24 @@ class CatClient(NamespacedClient):
           They are not intended for use by applications. For application consumption, use an index endpoint.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-indices.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param expand_wildcards: The type of index that wildcard patterns can match.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param health: The health status used to limit returned indices. By default,
             the response includes indices of any health status.
         :param help: When set to `true` will output available columns. This option can't
@@ -644,7 +1249,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -701,16 +1311,27 @@ class CatClient(NamespacedClient):
     def master(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[t.Union[str, t.Literal["host", "id", "ip", "node"]]],
+                t.Union[str, t.Literal["host", "id", "ip", "node"]],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -721,11 +1342,20 @@ class CatClient(NamespacedClient):
           <p>IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-master.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -736,11 +1366,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/master"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -761,6 +1399,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -899,12 +1539,19 @@ class CatClient(NamespacedClient):
           application consumption, use the get data frame analytics jobs statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics>`_
 
         :param id: The ID of the data frame analytics to fetch
         :param allow_no_match: Whether to ignore if a wildcard expression matches no
-            configs. (This includes `_all` string or when no configs have been specified)
-        :param bytes: The unit in which to display byte values
+            configs. (This includes `_all` string or when no configs have been specified.)
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: Comma-separated list of column names to display.
@@ -912,7 +1559,12 @@ class CatClient(NamespacedClient):
             be combined with any other query string option.
         :param s: Comma-separated list of column names or column aliases used to sort
             the response.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -963,6 +1615,9 @@ class CatClient(NamespacedClient):
         *,
         datafeed_id: t.Optional[str] = None,
         allow_no_match: t.Optional[bool] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
@@ -1067,7 +1722,7 @@ class CatClient(NamespacedClient):
           application consumption, use the get datafeed statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-datafeeds.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed.
@@ -1078,6 +1733,14 @@ class CatClient(NamespacedClient):
             array when there are no matches and the subset of results when there are
             partial matches. If `false`, the API returns a 404 status code when there
             are no matches or only partial matches.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: Comma-separated list of column names to display.
@@ -1085,7 +1748,12 @@ class CatClient(NamespacedClient):
             be combined with any other query string option.
         :param s: Comma-separated list of column names or column aliases used to sort
             the response.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -1098,6 +1766,8 @@ class CatClient(NamespacedClient):
         __query: t.Dict[str, t.Any] = {}
         if allow_no_match is not None:
             __query["allow_no_match"] = allow_no_match
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -1433,7 +2103,7 @@ class CatClient(NamespacedClient):
           application consumption, use the get anomaly detection job statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-anomaly-detectors.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param allow_no_match: Specifies what to do when the request: * Contains wildcard
@@ -1443,7 +2113,14 @@ class CatClient(NamespacedClient):
             array when there are no matches and the subset of results when there are
             partial matches. If `false`, the API returns a 404 status code when there
             are no matches or only partial matches.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: Comma-separated list of column names to display.
@@ -1451,7 +2128,12 @@ class CatClient(NamespacedClient):
             be combined with any other query string option.
         :param s: Comma-separated list of column names or column aliases used to sort
             the response.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -1618,7 +2300,7 @@ class CatClient(NamespacedClient):
           application consumption, use the get trained models statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-trained-model.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models>`_
 
         :param model_id: A unique identifier for the trained model.
         :param allow_no_match: Specifies what to do when the request: contains wildcard
@@ -1628,7 +2310,14 @@ class CatClient(NamespacedClient):
             when there are no matches and the subset of results when there are partial
             matches. If `false`, the API returns a 404 status code when there are no
             matches or only partial matches.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param from_: Skips the specified number of transforms.
@@ -1638,7 +2327,12 @@ class CatClient(NamespacedClient):
         :param s: A comma-separated list of column names or aliases used to sort the
             response.
         :param size: The maximum number of transforms to display.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -1691,16 +2385,39 @@ class CatClient(NamespacedClient):
     def nodeattrs(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "attr", "host", "id", "ip", "node", "pid", "port", "value"
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "attr", "host", "id", "ip", "node", "pid", "port", "value"
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -1711,11 +2428,20 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-nodeattrs.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -1726,11 +2452,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/nodeattrs"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -1751,6 +2485,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -1773,8 +2509,201 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        full_id: t.Optional[t.Union[bool, str]] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        full_id: t.Optional[bool] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "build",
+                            "completion.size",
+                            "cpu",
+                            "disk.avail",
+                            "disk.total",
+                            "disk.used",
+                            "disk.used_percent",
+                            "fielddata.evictions",
+                            "fielddata.memory_size",
+                            "file_desc.current",
+                            "file_desc.max",
+                            "file_desc.percent",
+                            "flush.total",
+                            "flush.total_time",
+                            "get.current",
+                            "get.exists_time",
+                            "get.exists_total",
+                            "get.missing_time",
+                            "get.missing_total",
+                            "get.time",
+                            "get.total",
+                            "heap.current",
+                            "heap.max",
+                            "heap.percent",
+                            "http_address",
+                            "id",
+                            "indexing.delete_current",
+                            "indexing.delete_time",
+                            "indexing.delete_total",
+                            "indexing.index_current",
+                            "indexing.index_failed",
+                            "indexing.index_failed_due_to_version_conflict",
+                            "indexing.index_time",
+                            "indexing.index_total",
+                            "ip",
+                            "jdk",
+                            "load_15m",
+                            "load_1m",
+                            "load_5m",
+                            "mappings.total_count",
+                            "mappings.total_estimated_overhead_in_bytes",
+                            "master",
+                            "merges.current",
+                            "merges.current_docs",
+                            "merges.current_size",
+                            "merges.total",
+                            "merges.total_docs",
+                            "merges.total_size",
+                            "merges.total_time",
+                            "name",
+                            "node.role",
+                            "pid",
+                            "port",
+                            "query_cache.evictions",
+                            "query_cache.hit_count",
+                            "query_cache.memory_size",
+                            "query_cache.miss_count",
+                            "ram.current",
+                            "ram.max",
+                            "ram.percent",
+                            "refresh.time",
+                            "refresh.total",
+                            "request_cache.evictions",
+                            "request_cache.hit_count",
+                            "request_cache.memory_size",
+                            "request_cache.miss_count",
+                            "script.cache_evictions",
+                            "script.compilations",
+                            "search.fetch_current",
+                            "search.fetch_time",
+                            "search.fetch_total",
+                            "search.open_contexts",
+                            "search.query_current",
+                            "search.query_time",
+                            "search.query_total",
+                            "search.scroll_current",
+                            "search.scroll_time",
+                            "search.scroll_total",
+                            "segments.count",
+                            "segments.fixed_bitset_memory",
+                            "segments.index_writer_memory",
+                            "segments.memory",
+                            "segments.version_map_memory",
+                            "shard_stats.total_count",
+                            "suggest.current",
+                            "suggest.time",
+                            "suggest.total",
+                            "uptime",
+                            "version",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "build",
+                        "completion.size",
+                        "cpu",
+                        "disk.avail",
+                        "disk.total",
+                        "disk.used",
+                        "disk.used_percent",
+                        "fielddata.evictions",
+                        "fielddata.memory_size",
+                        "file_desc.current",
+                        "file_desc.max",
+                        "file_desc.percent",
+                        "flush.total",
+                        "flush.total_time",
+                        "get.current",
+                        "get.exists_time",
+                        "get.exists_total",
+                        "get.missing_time",
+                        "get.missing_total",
+                        "get.time",
+                        "get.total",
+                        "heap.current",
+                        "heap.max",
+                        "heap.percent",
+                        "http_address",
+                        "id",
+                        "indexing.delete_current",
+                        "indexing.delete_time",
+                        "indexing.delete_total",
+                        "indexing.index_current",
+                        "indexing.index_failed",
+                        "indexing.index_failed_due_to_version_conflict",
+                        "indexing.index_time",
+                        "indexing.index_total",
+                        "ip",
+                        "jdk",
+                        "load_15m",
+                        "load_1m",
+                        "load_5m",
+                        "mappings.total_count",
+                        "mappings.total_estimated_overhead_in_bytes",
+                        "master",
+                        "merges.current",
+                        "merges.current_docs",
+                        "merges.current_size",
+                        "merges.total",
+                        "merges.total_docs",
+                        "merges.total_size",
+                        "merges.total_time",
+                        "name",
+                        "node.role",
+                        "pid",
+                        "port",
+                        "query_cache.evictions",
+                        "query_cache.hit_count",
+                        "query_cache.memory_size",
+                        "query_cache.miss_count",
+                        "ram.current",
+                        "ram.max",
+                        "ram.percent",
+                        "refresh.time",
+                        "refresh.total",
+                        "request_cache.evictions",
+                        "request_cache.hit_count",
+                        "request_cache.memory_size",
+                        "request_cache.miss_count",
+                        "script.cache_evictions",
+                        "script.compilations",
+                        "search.fetch_current",
+                        "search.fetch_time",
+                        "search.fetch_total",
+                        "search.open_contexts",
+                        "search.query_current",
+                        "search.query_time",
+                        "search.query_total",
+                        "search.scroll_current",
+                        "search.scroll_time",
+                        "search.scroll_total",
+                        "segments.count",
+                        "segments.fixed_bitset_memory",
+                        "segments.index_writer_memory",
+                        "segments.memory",
+                        "segments.version_map_memory",
+                        "shard_stats.total_count",
+                        "suggest.current",
+                        "suggest.time",
+                        "suggest.total",
+                        "uptime",
+                        "version",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         include_unloaded_segments: t.Optional[bool] = None,
@@ -1794,23 +2723,36 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-nodes.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes>`_
 
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param full_id: If `true`, return the full node ID. If `false`, return the shortened
             node ID.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param include_unloaded_segments: If true, the response includes information
             from segments that are not loaded into memory.
-        :param master_timeout: Period to wait for a connection to the master node.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
@@ -1858,10 +2800,25 @@ class CatClient(NamespacedClient):
     def pending_tasks(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal["insertOrder", "priority", "source", "timeInQueue"],
+                    ]
+                ],
+                t.Union[
+                    str, t.Literal["insertOrder", "priority", "source", "timeInQueue"]
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
@@ -1881,11 +2838,20 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-pending-tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -1896,12 +2862,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/pending_tasks"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -1940,10 +2913,25 @@ class CatClient(NamespacedClient):
     def plugins(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal["component", "description", "id", "name", "version"],
+                    ]
+                ],
+                t.Union[
+                    str, t.Literal["component", "description", "id", "name", "version"]
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         include_bootstrap: t.Optional[bool] = None,
@@ -1951,6 +2939,9 @@ class CatClient(NamespacedClient):
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -1961,11 +2952,20 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-plugins.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param include_bootstrap: Include bootstrap plugins in the response
@@ -1977,11 +2977,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/plugins"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2004,6 +3012,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -2029,7 +3039,74 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "bytes",
+                            "bytes_percent",
+                            "bytes_recovered",
+                            "bytes_total",
+                            "files",
+                            "files_percent",
+                            "files_recovered",
+                            "files_total",
+                            "index",
+                            "repository",
+                            "shard",
+                            "snapshot",
+                            "source_host",
+                            "source_node",
+                            "stage",
+                            "start_time",
+                            "start_time_millis",
+                            "stop_time",
+                            "stop_time_millis",
+                            "target_host",
+                            "target_node",
+                            "time",
+                            "translog_ops",
+                            "translog_ops_percent",
+                            "translog_ops_recovered",
+                            "type",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "bytes",
+                        "bytes_percent",
+                        "bytes_recovered",
+                        "bytes_total",
+                        "files",
+                        "files_percent",
+                        "files_recovered",
+                        "files_total",
+                        "index",
+                        "repository",
+                        "shard",
+                        "snapshot",
+                        "source_host",
+                        "source_node",
+                        "stage",
+                        "start_time",
+                        "start_time_millis",
+                        "stop_time",
+                        "stop_time_millis",
+                        "target_host",
+                        "target_node",
+                        "time",
+                        "translog_ops",
+                        "translog_ops_percent",
+                        "translog_ops_recovered",
+                        "type",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
@@ -2049,24 +3126,37 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-recovery.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
         :param active_only: If `true`, the response only includes ongoing shard recoveries.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param detailed: If `true`, the response includes detailed information about
             shard recoveries.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2117,6 +3207,9 @@ class CatClient(NamespacedClient):
     def repositories(
         self,
         *,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
@@ -2127,6 +3220,9 @@ class CatClient(NamespacedClient):
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -2137,8 +3233,16 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-repositories.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories>`_
 
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: List of columns to appear in the response. Supports simple wildcards.
@@ -2152,11 +3256,19 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cat/repositories"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2177,6 +3289,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -2200,13 +3314,61 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "committed",
+                            "compound",
+                            "docs.count",
+                            "docs.deleted",
+                            "generation",
+                            "id",
+                            "index",
+                            "ip",
+                            "prirep",
+                            "searchable",
+                            "segment",
+                            "shard",
+                            "size",
+                            "size.memory",
+                            "version",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "committed",
+                        "compound",
+                        "docs.count",
+                        "docs.deleted",
+                        "generation",
+                        "id",
+                        "index",
+                        "ip",
+                        "prirep",
+                        "searchable",
+                        "segment",
+                        "shard",
+                        "size",
+                        "size.memory",
+                        "version",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -2218,15 +3380,23 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-segments.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -2234,9 +3404,15 @@ class CatClient(NamespacedClient):
             from the cluster state of the master node. In both cases the coordinating
             node will send requests for further information to each selected node.
         :param master_timeout: Period to wait for a connection to the master node.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2269,6 +3445,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -2292,7 +3470,162 @@ class CatClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "completion.size",
+                            "dataset.size",
+                            "dense_vector.value_count",
+                            "docs",
+                            "dsparse_vector.value_count",
+                            "fielddata.evictions",
+                            "fielddata.memory_size",
+                            "flush.total",
+                            "flush.total_time",
+                            "get.current",
+                            "get.exists_time",
+                            "get.exists_total",
+                            "get.missing_time",
+                            "get.missing_total",
+                            "get.time",
+                            "get.total",
+                            "id",
+                            "index",
+                            "indexing.delete_current",
+                            "indexing.delete_time",
+                            "indexing.delete_total",
+                            "indexing.index_current",
+                            "indexing.index_failed",
+                            "indexing.index_failed_due_to_version_conflict",
+                            "indexing.index_time",
+                            "indexing.index_total",
+                            "ip",
+                            "merges.current",
+                            "merges.current_docs",
+                            "merges.current_size",
+                            "merges.total",
+                            "merges.total_docs",
+                            "merges.total_size",
+                            "merges.total_time",
+                            "node",
+                            "prirep",
+                            "query_cache.evictions",
+                            "query_cache.memory_size",
+                            "recoverysource.type",
+                            "refresh.time",
+                            "refresh.total",
+                            "search.fetch_current",
+                            "search.fetch_time",
+                            "search.fetch_total",
+                            "search.open_contexts",
+                            "search.query_current",
+                            "search.query_time",
+                            "search.query_total",
+                            "search.scroll_current",
+                            "search.scroll_time",
+                            "search.scroll_total",
+                            "segments.count",
+                            "segments.fixed_bitset_memory",
+                            "segments.index_writer_memory",
+                            "segments.memory",
+                            "segments.version_map_memory",
+                            "seq_no.global_checkpoint",
+                            "seq_no.local_checkpoint",
+                            "seq_no.max",
+                            "shard",
+                            "state",
+                            "store",
+                            "suggest.current",
+                            "suggest.time",
+                            "suggest.total",
+                            "sync_id",
+                            "unassigned.at",
+                            "unassigned.details",
+                            "unassigned.for",
+                            "unassigned.reason",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "completion.size",
+                        "dataset.size",
+                        "dense_vector.value_count",
+                        "docs",
+                        "dsparse_vector.value_count",
+                        "fielddata.evictions",
+                        "fielddata.memory_size",
+                        "flush.total",
+                        "flush.total_time",
+                        "get.current",
+                        "get.exists_time",
+                        "get.exists_total",
+                        "get.missing_time",
+                        "get.missing_total",
+                        "get.time",
+                        "get.total",
+                        "id",
+                        "index",
+                        "indexing.delete_current",
+                        "indexing.delete_time",
+                        "indexing.delete_total",
+                        "indexing.index_current",
+                        "indexing.index_failed",
+                        "indexing.index_failed_due_to_version_conflict",
+                        "indexing.index_time",
+                        "indexing.index_total",
+                        "ip",
+                        "merges.current",
+                        "merges.current_docs",
+                        "merges.current_size",
+                        "merges.total",
+                        "merges.total_docs",
+                        "merges.total_size",
+                        "merges.total_time",
+                        "node",
+                        "prirep",
+                        "query_cache.evictions",
+                        "query_cache.memory_size",
+                        "recoverysource.type",
+                        "refresh.time",
+                        "refresh.total",
+                        "search.fetch_current",
+                        "search.fetch_time",
+                        "search.fetch_total",
+                        "search.open_contexts",
+                        "search.query_current",
+                        "search.query_time",
+                        "search.query_total",
+                        "search.scroll_current",
+                        "search.scroll_time",
+                        "search.scroll_total",
+                        "segments.count",
+                        "segments.fixed_bitset_memory",
+                        "segments.index_writer_memory",
+                        "segments.memory",
+                        "segments.version_map_memory",
+                        "seq_no.global_checkpoint",
+                        "seq_no.local_checkpoint",
+                        "seq_no.max",
+                        "shard",
+                        "state",
+                        "store",
+                        "suggest.current",
+                        "suggest.time",
+                        "suggest.total",
+                        "sync_id",
+                        "unassigned.at",
+                        "unassigned.details",
+                        "unassigned.for",
+                        "unassigned.reason",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
@@ -2312,22 +3645,34 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-shards.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
-        :param bytes: The unit used to display byte values.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: List of columns to appear in the response. Supports simple wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
-        :param master_timeout: Period to wait for a connection to the master node.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2377,10 +3722,54 @@ class CatClient(NamespacedClient):
         self,
         *,
         repository: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "duration",
+                            "end_epoch",
+                            "end_time",
+                            "failed_shards",
+                            "id",
+                            "indices",
+                            "reason",
+                            "repository",
+                            "start_epoch",
+                            "start_time",
+                            "status",
+                            "successful_shards",
+                            "total_shards",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "duration",
+                        "end_epoch",
+                        "end_time",
+                        "failed_shards",
+                        "id",
+                        "indices",
+                        "reason",
+                        "repository",
+                        "start_epoch",
+                        "start_time",
+                        "status",
+                        "successful_shards",
+                        "total_shards",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
@@ -2401,14 +3790,23 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-snapshots.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots>`_
 
         :param repository: A comma-separated list of snapshot repositories used to limit
             the request. Accepts wildcard expressions. `_all` returns all repositories.
             If any repository fails during the request, Elasticsearch returns an error.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param ignore_unavailable: If `true`, the response does not include information
@@ -2417,7 +3815,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2428,6 +3831,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/snapshots"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2468,11 +3873,59 @@ class CatClient(NamespacedClient):
         self,
         *,
         actions: t.Optional[t.Sequence[str]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         detailed: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "action",
+                            "id",
+                            "ip",
+                            "node",
+                            "node_id",
+                            "parent_task_id",
+                            "port",
+                            "running_time",
+                            "running_time_ns",
+                            "start_time",
+                            "task_id",
+                            "timestamp",
+                            "type",
+                            "version",
+                            "x_opaque_id",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "action",
+                        "id",
+                        "ip",
+                        "node",
+                        "node_id",
+                        "parent_task_id",
+                        "port",
+                        "running_time",
+                        "running_time_ns",
+                        "start_time",
+                        "task_id",
+                        "timestamp",
+                        "type",
+                        "version",
+                        "x_opaque_id",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         nodes: t.Optional[t.Sequence[str]] = None,
@@ -2494,14 +3947,23 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks>`_
 
         :param actions: The task action names, which are used to limit the response.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param detailed: If `true`, the response includes detailed information about
             shard recoveries.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param nodes: Unique node identifiers, which are used to limit the response.
@@ -2510,7 +3972,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
-        :param time: Unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param timeout: Period to wait for a response. If no response is received before
             the timeout expires, the request fails and returns an error.
         :param v: When set to `true` will enable verbose output.
@@ -2522,6 +3989,8 @@ class CatClient(NamespacedClient):
         __query: t.Dict[str, t.Any] = {}
         if actions is not None:
             __query["actions"] = actions
+        if bytes is not None:
+            __query["bytes"] = bytes
         if detailed is not None:
             __query["detailed"] = detailed
         if error_trace is not None:
@@ -2567,16 +4036,39 @@ class CatClient(NamespacedClient):
         self,
         *,
         name: t.Optional[str] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "composed_of", "index_patterns", "name", "order", "version"
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "composed_of", "index_patterns", "name", "order", "version"
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         s: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        time: t.Optional[
+            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
+        ] = None,
         v: t.Optional[bool] = None,
     ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
         """
@@ -2588,13 +4080,22 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-templates.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates>`_
 
         :param name: The name of the template to return. Accepts wildcard expressions.
             If omitted, all templates are returned.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
-        :param h: List of columns to appear in the response. Supports simple wildcards.
+        :param h: A comma-separated list of columns names to display. It supports simple
+            wildcards.
         :param help: When set to `true` will output available columns. This option can't
             be combined with any other query string option.
         :param local: If `true`, the request computes the list of selected nodes from
@@ -2605,6 +4106,12 @@ class CatClient(NamespacedClient):
         :param s: List of columns that determine how the table should be sorted. Sorting
             defaults to ascending and can be changed by setting `:asc` or `:desc` as
             a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2615,6 +4122,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/templates"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2635,6 +4144,8 @@ class CatClient(NamespacedClient):
             __query["pretty"] = pretty
         if s is not None:
             __query["s"] = s
+        if time is not None:
+            __query["time"] = time
         if v is not None:
             __query["v"] = v
         __headers = {"accept": "text/plain,application/json"}
@@ -2652,10 +4163,68 @@ class CatClient(NamespacedClient):
         self,
         *,
         thread_pool_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
-        h: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        h: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "active",
+                            "completed",
+                            "core",
+                            "ephemeral_id",
+                            "host",
+                            "ip",
+                            "keep_alive",
+                            "largest",
+                            "max",
+                            "name",
+                            "node_id",
+                            "node_name",
+                            "pid",
+                            "pool_size",
+                            "port",
+                            "queue",
+                            "queue_size",
+                            "rejected",
+                            "size",
+                            "type",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "active",
+                        "completed",
+                        "core",
+                        "ephemeral_id",
+                        "host",
+                        "ip",
+                        "keep_alive",
+                        "largest",
+                        "max",
+                        "name",
+                        "node_id",
+                        "node_name",
+                        "pid",
+                        "pool_size",
+                        "port",
+                        "queue",
+                        "queue_size",
+                        "rejected",
+                        "size",
+                        "type",
+                    ],
+                ],
+            ]
+        ] = None,
         help: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         local: t.Optional[bool] = None,
@@ -2676,10 +4245,18 @@ class CatClient(NamespacedClient):
           IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-thread-pool.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool>`_
 
         :param thread_pool_patterns: A comma-separated list of thread pool names used
             to limit the request. Accepts wildcard expressions.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param h: List of columns to appear in the response. Supports simple wildcards.
@@ -2689,11 +4266,16 @@ class CatClient(NamespacedClient):
             the local cluster state. If `false` the list of selected nodes are computed
             from the cluster state of the master node. In both cases the coordinating
             node will send requests for further information to each selected node.
-        :param master_timeout: Period to wait for a connection to the master node.
-        :param s: List of columns that determine how the table should be sorted. Sorting
-            defaults to ascending and can be changed by setting `:asc` or `:desc` as
-            a suffix to the column name.
-        :param time: The unit used to display time values.
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param s: A comma-separated list of column names or aliases that determines the
+            sort order. Sorting defaults to ascending and can be changed by setting `:asc`
+            or `:desc` as a suffix to the column name.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2704,6 +4286,8 @@ class CatClient(NamespacedClient):
             __path_parts = {}
             __path = "/_cat/thread_pool"
         __query: t.Dict[str, t.Any] = {}
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2746,6 +4330,9 @@ class CatClient(NamespacedClient):
         *,
         transform_id: t.Optional[str] = None,
         allow_no_match: t.Optional[bool] = None,
+        bytes: t.Optional[
+            t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]]
+        ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         format: t.Optional[str] = None,
@@ -2933,7 +4520,7 @@ class CatClient(NamespacedClient):
           application consumption, use the get transform statistics API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cat-transforms.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms>`_
 
         :param transform_id: A transform identifier or a wildcard expression. If you
             do not specify one of these options, the API returns information for all
@@ -2945,6 +4532,14 @@ class CatClient(NamespacedClient):
             array when there are no matches and the subset of results when there are
             partial matches. If `false`, the request returns a 404 status code when there
             are no matches or only partial matches.
+        :param bytes: Sets the units for columns that contain a byte-size value. Note
+            that byte-size value units work in terms of powers of 1024. For instance
+            `1kb` means 1024 bytes, not 1000 bytes. If omitted, byte-size values are
+            rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the
+            numeric value of the column is as small as possible whilst still being at
+            least `1.0`. If given, byte-size values are rendered as an integer with no
+            suffix, representing the value of the column in the chosen unit. Values that
+            are not an exact multiple of the chosen unit are rounded down.
         :param format: Specifies the format to return the columnar data in, can be set
             to `text`, `json`, `cbor`, `yaml`, or `smile`.
         :param from_: Skips the specified number of transforms.
@@ -2954,7 +4549,12 @@ class CatClient(NamespacedClient):
         :param s: Comma-separated list of column names or column aliases used to sort
             the response.
         :param size: The maximum number of transforms to obtain.
-        :param time: The unit used to display time values.
+        :param time: Sets the units for columns that contain a time duration. If omitted,
+            time duration values are rendered with a suffix such as `ms`, `s`, `m` or
+            `h`, chosen such that the numeric value of the column is as small as possible
+            whilst still being at least `1.0`. If given, time duration values are rendered
+            as an integer with no suffix. Values that are not an exact multiple of the
+            chosen unit are rounded down.
         :param v: When set to `true` will enable verbose output.
         """
         __path_parts: t.Dict[str, str]
@@ -2967,6 +4567,8 @@ class CatClient(NamespacedClient):
         __query: t.Dict[str, t.Any] = {}
         if allow_no_match is not None:
             __query["allow_no_match"] = allow_no_match
+        if bytes is not None:
+            __query["bytes"] = bytes
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/ccr.py 9.2.0-1/elasticsearch/_sync/client/ccr.py
--- 8.17.2-2/elasticsearch/_sync/client/ccr.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/ccr.py	2025-10-28 16:50:53.000000000 +0000
@@ -33,18 +33,23 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Delete auto-follow patterns.
-          Delete a collection of cross-cluster replication auto-follow patterns.</p>
+          <p>Delete auto-follow patterns.</p>
+          <p>Delete a collection of cross-cluster replication auto-follow patterns.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-delete-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern>`_
 
-        :param name: The name of the auto follow pattern.
+        :param name: The auto-follow pattern collection to delete.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -57,6 +62,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -72,6 +79,8 @@ class CcrClient(NamespacedClient):
     @_rewrite_parameters(
         body_fields=(
             "leader_index",
+            "remote_cluster",
+            "data_stream_name",
             "max_outstanding_read_requests",
             "max_outstanding_write_requests",
             "max_read_request_operation_count",
@@ -82,29 +91,32 @@ class CcrClient(NamespacedClient):
             "max_write_request_operation_count",
             "max_write_request_size",
             "read_poll_timeout",
-            "remote_cluster",
+            "settings",
         ),
     )
     def follow(
         self,
         *,
         index: str,
+        leader_index: t.Optional[str] = None,
+        remote_cluster: t.Optional[str] = None,
+        data_stream_name: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        leader_index: t.Optional[str] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         max_outstanding_read_requests: t.Optional[int] = None,
         max_outstanding_write_requests: t.Optional[int] = None,
         max_read_request_operation_count: t.Optional[int] = None,
-        max_read_request_size: t.Optional[str] = None,
+        max_read_request_size: t.Optional[t.Union[int, str]] = None,
         max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         max_write_buffer_count: t.Optional[int] = None,
-        max_write_buffer_size: t.Optional[str] = None,
+        max_write_buffer_size: t.Optional[t.Union[int, str]] = None,
         max_write_request_operation_count: t.Optional[int] = None,
-        max_write_request_size: t.Optional[str] = None,
+        max_write_request_size: t.Optional[t.Union[int, str]] = None,
         pretty: t.Optional[bool] = None,
         read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        remote_cluster: t.Optional[str] = None,
+        settings: t.Optional[t.Mapping[str, t.Any]] = None,
         wait_for_active_shards: t.Optional[
             t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
         ] = None,
@@ -118,28 +130,54 @@ class CcrClient(NamespacedClient):
           When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-put-follow.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow>`_
 
-        :param index: The name of the follower index
-        :param leader_index:
-        :param max_outstanding_read_requests:
-        :param max_outstanding_write_requests:
-        :param max_read_request_operation_count:
-        :param max_read_request_size:
-        :param max_retry_delay:
-        :param max_write_buffer_count:
-        :param max_write_buffer_size:
-        :param max_write_request_operation_count:
-        :param max_write_request_size:
-        :param read_poll_timeout:
-        :param remote_cluster:
-        :param wait_for_active_shards: Sets the number of shard copies that must be active
-            before returning. Defaults to 0. Set to `all` for all shard copies, otherwise
-            set to any non-negative value less than or equal to the total number of copies
-            for the shard (number of replicas + 1)
+        :param index: The name of the follower index.
+        :param leader_index: The name of the index in the leader cluster to follow.
+        :param remote_cluster: The remote cluster containing the leader index.
+        :param data_stream_name: If the leader index is part of a data stream, the name
+            to which the local data stream for the followed index should be renamed.
+        :param master_timeout: Period to wait for a connection to the master node.
+        :param max_outstanding_read_requests: The maximum number of outstanding reads
+            requests from the remote cluster.
+        :param max_outstanding_write_requests: The maximum number of outstanding write
+            requests on the follower.
+        :param max_read_request_operation_count: The maximum number of operations to
+            pull per read from the remote cluster.
+        :param max_read_request_size: The maximum size in bytes of per read of a batch
+            of operations pulled from the remote cluster.
+        :param max_retry_delay: The maximum time to wait before retrying an operation
+            that failed exceptionally. An exponential backoff strategy is employed when
+            retrying.
+        :param max_write_buffer_count: The maximum number of operations that can be queued
+            for writing. When this limit is reached, reads from the remote cluster will
+            be deferred until the number of queued operations goes below the limit.
+        :param max_write_buffer_size: The maximum total bytes of operations that can
+            be queued for writing. When this limit is reached, reads from the remote
+            cluster will be deferred until the total bytes of queued operations goes
+            below the limit.
+        :param max_write_request_operation_count: The maximum number of operations per
+            bulk write request executed on the follower.
+        :param max_write_request_size: The maximum total bytes of operations per bulk
+            write request executed on the follower.
+        :param read_poll_timeout: The maximum time to wait for new operations on the
+            remote cluster when the follower index is synchronized with the leader index.
+            When the timeout has elapsed, the poll for operations will return to the
+            follower so that it can update some statistics. Then the follower will immediately
+            attempt to read from the leader again.
+        :param settings: Settings to override from the leader index.
+        :param wait_for_active_shards: Specifies the number of shards to wait on being
+            active before responding. This defaults to waiting on none of the shards
+            to be active. A shard must be restored from the leader index before being
+            active. Restoring a follower shard requires transferring all the remote Lucene
+            segment files to the follower index.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
+        if leader_index is None and body is None:
+            raise ValueError("Empty value passed for parameter 'leader_index'")
+        if remote_cluster is None and body is None:
+            raise ValueError("Empty value passed for parameter 'remote_cluster'")
         __path_parts: t.Dict[str, str] = {"index": _quote(index)}
         __path = f'/{__path_parts["index"]}/_ccr/follow'
         __query: t.Dict[str, t.Any] = {}
@@ -150,6 +188,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if wait_for_active_shards is not None:
@@ -157,6 +197,10 @@ class CcrClient(NamespacedClient):
         if not __body:
             if leader_index is not None:
                 __body["leader_index"] = leader_index
+            if remote_cluster is not None:
+                __body["remote_cluster"] = remote_cluster
+            if data_stream_name is not None:
+                __body["data_stream_name"] = data_stream_name
             if max_outstanding_read_requests is not None:
                 __body["max_outstanding_read_requests"] = max_outstanding_read_requests
             if max_outstanding_write_requests is not None:
@@ -183,8 +227,8 @@ class CcrClient(NamespacedClient):
                 __body["max_write_request_size"] = max_write_request_size
             if read_poll_timeout is not None:
                 __body["read_poll_timeout"] = read_poll_timeout
-            if remote_cluster is not None:
-                __body["remote_cluster"] = remote_cluster
+            if settings is not None:
+                __body["settings"] = settings
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "PUT",
@@ -204,20 +248,24 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get follower information.
-          Get information about all cross-cluster replication follower indices.
+          <p>Get follower information.</p>
+          <p>Get information about all cross-cluster replication follower indices.
           For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-follow-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info>`_
 
-        :param index: A comma-separated list of index patterns; use `_all` to perform
-            the operation on all indices
+        :param index: A comma-delimited list of follower index patterns.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -230,6 +278,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -251,19 +301,21 @@ class CcrClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get follower stats.
-          Get cross-cluster replication follower stats.
+          <p>Get follower stats.</p>
+          <p>Get cross-cluster replication follower stats.
           The API returns shard-level stats about the &quot;following tasks&quot; associated with each shard for the specified indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-follow-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats>`_
 
-        :param index: A comma-separated list of index patterns; use `_all` to perform
-            the operation on all indices
+        :param index: A comma-delimited list of index patterns.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -278,6 +330,8 @@ class CcrClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -308,6 +362,7 @@ class CcrClient(NamespacedClient):
         human: t.Optional[bool] = None,
         leader_remote_cluster: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -325,7 +380,7 @@ class CcrClient(NamespacedClient):
           The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-forget-follower.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower>`_
 
         :param index: the name of the leader index for which specified follower retention
             leases should be removed
@@ -333,6 +388,8 @@ class CcrClient(NamespacedClient):
         :param follower_index:
         :param follower_index_uuid:
         :param leader_remote_cluster:
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -348,6 +405,8 @@ class CcrClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         if not __body:
             if follower_cluster is not None:
                 __body["follower_cluster"] = follower_cluster
@@ -376,19 +435,24 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get auto-follow patterns.
-          Get cross-cluster replication auto-follow patterns.</p>
+          <p>Get auto-follow patterns.</p>
+          <p>Get cross-cluster replication auto-follow patterns.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1>`_
 
-        :param name: Specifies the auto-follow pattern collection that you want to retrieve.
-            If you do not specify a name, the API returns information for all collections.
+        :param name: The auto-follow pattern collection that you want to retrieve. If
+            you do not specify a name, the API returns information for all collections.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         __path_parts: t.Dict[str, str]
         if name not in SKIP_IN_PATH:
@@ -404,6 +468,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -424,13 +490,14 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Pause an auto-follow pattern.
-          Pause a cross-cluster replication auto-follow pattern.
+          <p>Pause an auto-follow pattern.</p>
+          <p>Pause a cross-cluster replication auto-follow pattern.
           When the API returns, the auto-follow pattern is inactive.
           New indices that are created on the remote cluster and match the auto-follow patterns are ignored.</p>
           <p>You can resume auto-following with the resume auto-follow pattern API.
@@ -438,10 +505,13 @@ class CcrClient(NamespacedClient):
           Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-pause-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern>`_
 
-        :param name: The name of the auto follow pattern that should pause discovering
-            new indices to follow.
+        :param name: The name of the auto-follow pattern to pause.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -454,6 +524,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -474,22 +546,26 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Pause a follower.
-          Pause a cross-cluster replication follower index.
+          <p>Pause a follower.</p>
+          <p>Pause a cross-cluster replication follower index.
           The follower index will not fetch any additional operations from the leader index.
           You can resume following with the resume follower API.
           You can pause and resume a follower index to change the configuration of the following task.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-pause-follow.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow>`_
 
-        :param index: The name of the follower index that should pause following its
-            leader index.
+        :param index: The name of the follower index.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -502,6 +578,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -544,6 +622,7 @@ class CcrClient(NamespacedClient):
         human: t.Optional[bool] = None,
         leader_index_exclusion_patterns: t.Optional[t.Sequence[str]] = None,
         leader_index_patterns: t.Optional[t.Sequence[str]] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         max_outstanding_read_requests: t.Optional[int] = None,
         max_outstanding_write_requests: t.Optional[int] = None,
         max_read_request_operation_count: t.Optional[int] = None,
@@ -569,7 +648,7 @@ class CcrClient(NamespacedClient):
           NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-put-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern>`_
 
         :param name: The name of the collection of auto-follow patterns.
         :param remote_cluster: The remote cluster containing the leader indices to match
@@ -584,6 +663,7 @@ class CcrClient(NamespacedClient):
             or more leader_index_exclusion_patterns won’t be followed.
         :param leader_index_patterns: An array of simple index patterns to match against
             indices in the remote cluster specified by the remote_cluster field.
+        :param master_timeout: Period to wait for a connection to the master node.
         :param max_outstanding_read_requests: The maximum number of outstanding reads
             requests from the remote cluster.
         :param max_outstanding_write_requests: The maximum number of outstanding reads
@@ -628,6 +708,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -688,21 +770,25 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Resume an auto-follow pattern.
-          Resume a cross-cluster replication auto-follow pattern that was paused.
+          <p>Resume an auto-follow pattern.</p>
+          <p>Resume a cross-cluster replication auto-follow pattern that was paused.
           The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster.
           Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-resume-auto-follow-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern>`_
 
-        :param name: The name of the auto follow pattern to resume discovering new indices
-            to follow.
+        :param name: The name of the auto-follow pattern to resume.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -715,6 +801,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -748,6 +836,7 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         max_outstanding_read_requests: t.Optional[int] = None,
         max_outstanding_write_requests: t.Optional[int] = None,
         max_read_request_operation_count: t.Optional[int] = None,
@@ -771,9 +860,10 @@ class CcrClient(NamespacedClient):
           When this API returns, the follower index will resume fetching operations from the leader index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-resume-follow.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow>`_
 
         :param index: The name of the follow index to resume following.
+        :param master_timeout: Period to wait for a connection to the master node.
         :param max_outstanding_read_requests:
         :param max_outstanding_write_requests:
         :param max_read_request_operation_count:
@@ -797,6 +887,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -848,16 +940,25 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get cross-cluster replication stats.
-          This API returns stats about auto-following and the same shard-level stats as the get follower stats API.</p>
+          <p>Get cross-cluster replication stats.</p>
+          <p>This API returns stats about auto-following and the same shard-level stats as the get follower stats API.</p>
+
 
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats>`_
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-get-stats.html>`_
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ccr/stats"
@@ -868,8 +969,12 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -888,22 +993,29 @@ class CcrClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Unfollow an index.
-          Convert a cross-cluster replication follower index to a regular index.
+          <p>Unfollow an index.</p>
+          <p>Convert a cross-cluster replication follower index to a regular index.
           The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication.
           The follower index must be paused and closed before you call the unfollow API.</p>
-          <p>NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.</p>
-
+          <blockquote>
+          <p>info
+          Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.</p>
+          </blockquote>
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ccr-post-unfollow.html>`_
 
-        :param index: The name of the follower index that should be turned into a regular
-            index.
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow>`_
+
+        :param index: The name of the follower index.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. It can also be set to `-1` to indicate that the
+            request should never timeout.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -916,6 +1028,8 @@ class CcrClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
diff -pruN 8.17.2-2/elasticsearch/_sync/client/cluster.py 9.2.0-1/elasticsearch/_sync/client/cluster.py
--- 8.17.2-2/elasticsearch/_sync/client/cluster.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/cluster.py	2025-10-28 16:50:53.000000000 +0000
@@ -38,6 +38,7 @@ class ClusterClient(NamespacedClient):
         include_disk_info: t.Optional[bool] = None,
         include_yes_decisions: t.Optional[bool] = None,
         index: t.Optional[str] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         primary: t.Optional[bool] = None,
         shard: t.Optional[int] = None,
@@ -48,23 +49,25 @@ class ClusterClient(NamespacedClient):
 
           <p>Explain the shard allocations.
           Get explanations for shard allocations in the cluster.
+          This API accepts the current_node, index, primary and shard parameters in the request body or in query parameters, but not in both at the same time.
           For unassigned shards, it provides an explanation for why the shard is unassigned.
           For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node.
-          This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.</p>
+          This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.
+          Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-allocation-explain.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain>`_
 
-        :param current_node: Specifies the node ID or the name of the node to only explain
-            a shard that is currently located on the specified node.
+        :param current_node: Explain a shard only if it is currently located on the specified
+            node name or node ID.
         :param include_disk_info: If true, returns information about disk usage and shard
             sizes.
         :param include_yes_decisions: If true, returns YES decisions in explanation.
-        :param index: Specifies the name of the index that you would like an explanation
-            for.
-        :param primary: If true, returns explanation for the primary shard for the given
-            shard ID.
-        :param shard: Specifies the ID of the shard that you would like an explanation
+        :param index: The name of the index that you would like an explanation for.
+        :param master_timeout: Period to wait for a connection to the master node.
+        :param primary: If true, returns an explanation for the primary shard for the
+            specified shard ID.
+        :param shard: An identifier for the shard that you would like an explanation
             for.
         """
         __path_parts: t.Dict[str, str] = {}
@@ -81,6 +84,8 @@ class ClusterClient(NamespacedClient):
             __query["include_disk_info"] = include_disk_info
         if include_yes_decisions is not None:
             __query["include_yes_decisions"] = include_yes_decisions
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -126,7 +131,7 @@ class ClusterClient(NamespacedClient):
           Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template>`_
 
         :param name: Comma-separated list or wildcard expression of component template
             names used to limit the request.
@@ -170,6 +175,7 @@ class ClusterClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         wait_for_removal: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -180,8 +186,9 @@ class ClusterClient(NamespacedClient):
           Remove master-eligible nodes from the voting configuration exclusion list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/voting-config-exclusions.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions>`_
 
+        :param master_timeout: Period to wait for a connection to the master node.
         :param wait_for_removal: Specifies whether to wait for all excluded nodes to
             be removed from the cluster before clearing the voting configuration exclusions
             list. Defaults to true, meaning that all excluded nodes must be removed from
@@ -198,6 +205,8 @@ class ClusterClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if wait_for_removal is not None:
@@ -231,7 +240,7 @@ class ClusterClient(NamespacedClient):
           Returns information about whether a particular component template exists.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template>`_
 
         :param name: Comma-separated list of component template names used to limit the
             request. Wildcard (*) expressions are supported.
@@ -282,6 +291,7 @@ class ClusterClient(NamespacedClient):
         local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        settings_filter: t.Optional[t.Union[str, t.Sequence[str]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -290,7 +300,7 @@ class ClusterClient(NamespacedClient):
           Get information about component templates.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template>`_
 
         :param name: Comma-separated list of component template names used to limit the
             request. Wildcard (`*`) expressions are supported.
@@ -302,6 +312,8 @@ class ClusterClient(NamespacedClient):
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
+        :param settings_filter: Filter out results, for example to filter out sensitive
+            information. Supports wildcards or full settings keys
         """
         __path_parts: t.Dict[str, str]
         if name not in SKIP_IN_PATH:
@@ -327,6 +339,8 @@ class ClusterClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if settings_filter is not None:
+            __query["settings_filter"] = settings_filter
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -353,15 +367,20 @@ class ClusterClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get cluster-wide settings.
-          By default, it returns only settings that have been explicitly defined.</p>
+          <p>Get cluster-wide settings.</p>
+          <p>By default, it returns only settings that have been explicitly defined.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-get-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings>`_
 
         :param flat_settings: If `true`, returns settings in flat format.
-        :param include_defaults: If `true`, returns default cluster settings from the
-            local node.
+        :param include_defaults: If `true`, also returns default values for all other
+            cluster settings, reflecting the values in the `elasticsearch.yml` file of
+            one of the nodes in the cluster. If the nodes in your cluster do not all
+            have the same values in their `elasticsearch.yml` config files then the values
+            returned by this API may vary from invocation to invocation and may not reflect
+            the values that Elasticsearch uses in all situations. Use the `GET _nodes/settings`
+            API to fetch the settings for each individual node in your cluster.
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
@@ -433,14 +452,14 @@ class ClusterClient(NamespacedClient):
         wait_for_no_relocating_shards: t.Optional[bool] = None,
         wait_for_nodes: t.Optional[t.Union[int, str]] = None,
         wait_for_status: t.Optional[
-            t.Union[str, t.Literal["green", "red", "yellow"]]
+            t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]]
         ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get the cluster health status.
-          You can also use the API to get the health status of only specified data streams and indices.
+          <p>Get the cluster health status.</p>
+          <p>You can also use the API to get the health status of only specified data streams and indices.
           For data streams, the API retrieves the health status of the stream’s backing indices.</p>
           <p>The cluster health status is: green, yellow or red.
           On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated.
@@ -449,7 +468,7 @@ class ClusterClient(NamespacedClient):
           The cluster status is controlled by the worst index status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-health.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health>`_
 
         :param index: Comma-separated list of data streams, indices, and index aliases
             used to limit the request. Wildcard expressions (`*`) are supported. To target
@@ -557,7 +576,7 @@ class ClusterClient(NamespacedClient):
           Returns basic information about the cluster.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info>`_
 
         :param target: Limits the information returned to the specific target. Supports
             a comma-separated list, such as http,ingest.
@@ -606,7 +625,7 @@ class ClusterClient(NamespacedClient):
           However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-pending.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks>`_
 
         :param local: If `true`, the request retrieves information from the local node
             only. If `false`, information is retrieved from the master node.
@@ -646,6 +665,7 @@ class ClusterClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         node_ids: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         node_names: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         pretty: t.Optional[bool] = None,
@@ -671,8 +691,9 @@ class ClusterClient(NamespacedClient):
           They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/voting-config-exclusions.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions>`_
 
+        :param master_timeout: Period to wait for a connection to the master node.
         :param node_ids: A comma-separated list of the persistent ids of the nodes to
             exclude from the voting configuration. If specified, you may not also specify
             node_names.
@@ -692,6 +713,8 @@ class ClusterClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if node_ids is not None:
             __query["node_ids"] = node_ids
         if node_names is not None:
@@ -719,6 +742,7 @@ class ClusterClient(NamespacedClient):
         *,
         name: str,
         template: t.Optional[t.Mapping[str, t.Any]] = None,
+        cause: t.Optional[str] = None,
         create: t.Optional[bool] = None,
         deprecated: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
@@ -749,7 +773,7 @@ class ClusterClient(NamespacedClient):
           To be applied, a component template must be included in an index template's <code>composed_of</code> list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-component-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template>`_
 
         :param name: Name of the component template to create. Elasticsearch includes
             the following built-in component templates: `logs-mappings`; `logs-settings`;
@@ -762,6 +786,7 @@ class ClusterClient(NamespacedClient):
             update settings API.
         :param template: The template to be applied which includes mappings, settings,
             or aliases configuration.
+        :param cause: User defined reason for create the component template.
         :param create: If `true`, this request cannot replace or update existing component
             templates.
         :param deprecated: Marks this index template as deprecated. When creating or
@@ -786,6 +811,8 @@ class ClusterClient(NamespacedClient):
         __path = f'/_component_template/{__path_parts["name"]}'
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if cause is not None:
+            __query["cause"] = cause
         if create is not None:
             __query["create"] = create
         if error_trace is not None:
@@ -838,8 +865,8 @@ class ClusterClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Update the cluster settings.
-          Configure and update dynamic settings on a running cluster.
+          <p>Update the cluster settings.</p>
+          <p>Configure and update dynamic settings on a running cluster.
           You can also configure dynamic settings locally on an unstarted or shut down node in <code>elasticsearch.yml</code>.</p>
           <p>Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart.
           You can also reset transient or persistent settings by assigning them a null value.</p>
@@ -854,13 +881,13 @@ class ClusterClient(NamespacedClient):
           If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-update-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings>`_
 
         :param flat_settings: Return settings in flat format (default: false)
         :param master_timeout: Explicit operation timeout for connection to master node
-        :param persistent:
+        :param persistent: The settings that persist after the cluster restarts.
         :param timeout: Explicit operation timeout
-        :param transient:
+        :param transient: The settings that do not persist after the cluster restarts.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_cluster/settings"
@@ -908,12 +935,19 @@ class ClusterClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get remote cluster information.
-          Get all of the configured remote cluster information.
-          This API returns connection and endpoint information keyed by the configured remote cluster alias.</p>
+          <p>Get remote cluster information.</p>
+          <p>Get information about configured remote clusters.
+          The API returns connection and endpoint information keyed by the configured remote cluster alias.</p>
+          <blockquote>
+          <p>info
+          This API returns information that reflects current state on the local cluster.
+          The <code>connected</code> field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it.
+          Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster.
+          To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the <a href="https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster">resolve cluster endpoint</a>.</p>
+          </blockquote>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-remote-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_remote/info"
@@ -970,7 +1004,7 @@ class ClusterClient(NamespacedClient):
           <p>Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the <code>?retry_failed</code> URI query parameter, which will attempt a single retry round for these shards.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-reroute.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute>`_
 
         :param commands: Defines the commands to perform.
         :param dry_run: If true, then the request simulates the operation. It will calculate
@@ -1075,7 +1109,7 @@ class ClusterClient(NamespacedClient):
           Instead, obtain the information you require using other more stable cluster APIs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-state.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state>`_
 
         :param metric: Limit the information returned to the specified metrics
         :param index: A comma-separated list of index names; use `_all` or empty string
@@ -1090,7 +1124,8 @@ class ClusterClient(NamespacedClient):
             when unavailable (missing or closed)
         :param local: Return local information, do not retrieve the state from master
             node (default: false)
-        :param master_timeout: Specify timeout for connection to master
+        :param master_timeout: Timeout for waiting for new cluster state in case it is
+            blocked
         :param wait_for_metadata_version: Wait for the metadata version to be equal or
             greater than the specified metadata version
         :param wait_for_timeout: The maximum time to wait for wait_for_metadata_version
@@ -1163,7 +1198,7 @@ class ClusterClient(NamespacedClient):
           Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats>`_
 
         :param node_id: Comma-separated list of node filters used to limit returned information.
             Defaults to all nodes in the cluster.
diff -pruN 8.17.2-2/elasticsearch/_sync/client/connector.py 9.2.0-1/elasticsearch/_sync/client/connector.py
--- 8.17.2-2/elasticsearch/_sync/client/connector.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/connector.py	2025-10-28 16:50:53.000000000 +0000
@@ -49,7 +49,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the <code>last_seen</code> field in the connector and set it to the current timestamp.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/check-in-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in>`_
 
         :param connector_id: The unique identifier of the connector to be checked in
         """
@@ -85,6 +85,7 @@ class ConnectorClient(NamespacedClient):
         delete_sync_jobs: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        hard: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -98,11 +99,12 @@ class ConnectorClient(NamespacedClient):
           These need to be removed manually.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete>`_
 
         :param connector_id: The unique identifier of the connector to be deleted
         :param delete_sync_jobs: A flag indicating if associated sync jobs should be
-            also removed. Defaults to false.
+            also removed.
+        :param hard: A flag indicating if the connector should be hard deleted.
         """
         if connector_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'connector_id'")
@@ -115,6 +117,8 @@ class ConnectorClient(NamespacedClient):
             __query["error_trace"] = error_trace
         if filter_path is not None:
             __query["filter_path"] = filter_path
+        if hard is not None:
+            __query["hard"] = hard
         if human is not None:
             __query["human"] = human
         if pretty is not None:
@@ -138,6 +142,7 @@ class ConnectorClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        include_deleted: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -147,9 +152,11 @@ class ConnectorClient(NamespacedClient):
           <p>Get the details about a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get>`_
 
         :param connector_id: The unique identifier of the connector
+        :param include_deleted: A flag to indicate if the desired connector should be
+            fetched, even if it was soft-deleted.
         """
         if connector_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'connector_id'")
@@ -162,6 +169,8 @@ class ConnectorClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if include_deleted is not None:
+            __query["include_deleted"] = include_deleted
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -247,7 +256,7 @@ class ConnectorClient(NamespacedClient):
           This action is used for analytics and monitoring.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-last-sync-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param last_access_control_sync_error:
@@ -333,6 +342,7 @@ class ConnectorClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         from_: t.Optional[int] = None,
         human: t.Optional[bool] = None,
+        include_deleted: t.Optional[bool] = None,
         index_name: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         pretty: t.Optional[bool] = None,
         query: t.Optional[str] = None,
@@ -346,11 +356,13 @@ class ConnectorClient(NamespacedClient):
           <p>Get information about all connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list>`_
 
         :param connector_name: A comma-separated list of connector names to fetch connector
             documents for
-        :param from_: Starting offset (default: 0)
+        :param from_: Starting offset
+        :param include_deleted: A flag to indicate if the desired connector should be
+            fetched, even if it was soft-deleted.
         :param index_name: A comma-separated list of connector index names to fetch connector
             documents for
         :param query: A wildcard query string that filters connectors with matching name,
@@ -372,6 +384,8 @@ class ConnectorClient(NamespacedClient):
             __query["from"] = from_
         if human is not None:
             __query["human"] = human
+        if include_deleted is not None:
+            __query["include_deleted"] = include_deleted
         if index_name is not None:
             __query["index_name"] = index_name
         if pretty is not None:
@@ -427,7 +441,7 @@ class ConnectorClient(NamespacedClient):
           Self-managed connectors (Connector clients) are self-managed on your infrastructure.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put>`_
 
         :param description:
         :param index_name:
@@ -509,7 +523,7 @@ class ConnectorClient(NamespacedClient):
           <p>Create or update a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-connector-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put>`_
 
         :param connector_id: The unique identifier of the connector to be created or
             updated. ID is auto-generated if not provided.
@@ -584,7 +598,7 @@ class ConnectorClient(NamespacedClient):
           The connector service is then responsible for setting the status of connector sync jobs to cancelled.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cancel-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job
         """
@@ -635,7 +649,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/check-in-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job
             to be checked in.
@@ -695,7 +709,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/claim-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job.
         :param worker_hostname: The host name of the current system that will run the
@@ -757,7 +771,7 @@ class ConnectorClient(NamespacedClient):
           This is a destructive action that is not recoverable.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job
             to be deleted
@@ -811,7 +825,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/set-connector-sync-job-error-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error>`_
 
         :param connector_sync_job_id: The unique identifier for the connector sync job.
         :param error: The error for the connector sync job error field.
@@ -865,7 +879,7 @@ class ConnectorClient(NamespacedClient):
           <p>Get a connector sync job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job
         """
@@ -938,10 +952,10 @@ class ConnectorClient(NamespacedClient):
           <p>Get information about all stored connector sync jobs listed by their creation date in ascending order.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-connector-sync-jobs-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list>`_
 
         :param connector_id: A connector id to fetch connector sync jobs for
-        :param from_: Starting offset (default: 0)
+        :param from_: Starting offset
         :param job_type: A comma-separated list of job types to fetch the sync jobs for
         :param size: Specifies a max number of results to get
         :param status: A sync job status to fetch connector sync jobs for
@@ -1004,7 +1018,7 @@ class ConnectorClient(NamespacedClient):
           <p>Create a connector sync job document in the internal index and initialize its counters and timestamps with default values.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-connector-sync-job-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post>`_
 
         :param id: The id of the associated connector
         :param job_type:
@@ -1080,7 +1094,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/set-connector-sync-job-stats-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats>`_
 
         :param connector_sync_job_id: The unique identifier of the connector sync job.
         :param deleted_document_count: The number of documents the sync job deleted.
@@ -1163,7 +1177,7 @@ class ConnectorClient(NamespacedClient):
           <p>Activates the valid draft filtering for a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-filtering-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         """
@@ -1216,7 +1230,7 @@ class ConnectorClient(NamespacedClient):
           Self-managed connectors (connector clients) do not use this field.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-api-key-id-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param api_key_id:
@@ -1275,7 +1289,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the configuration field in the connector document.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-configuration-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param configuration:
@@ -1335,7 +1349,7 @@ class ConnectorClient(NamespacedClient):
           Otherwise, if the error is reset to null, the connector status is updated to connected.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-error-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param error:
@@ -1403,7 +1417,7 @@ class ConnectorClient(NamespacedClient):
           This service runs automatically on Elastic Cloud for Elastic managed connectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-features-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features>`_
 
         :param connector_id: The unique identifier of the connector to be updated.
         :param features:
@@ -1464,7 +1478,7 @@ class ConnectorClient(NamespacedClient):
           The filtering property is used to configure sync rules (both basic and advanced) for a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-filtering-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param advanced_snippet:
@@ -1525,7 +1539,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the draft filtering validation info for a connector.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-filtering-validation-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param validation:
@@ -1582,7 +1596,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the <code>index_name</code> field of a connector, specifying the index where the data ingested by the connector is stored.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-index-name-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param index_name:
@@ -1639,7 +1653,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector name and description.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-name-description-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param description:
@@ -1696,7 +1710,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector is_native flag.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-native-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param is_native:
@@ -1753,7 +1767,7 @@ class ConnectorClient(NamespacedClient):
           <p>When you create a new connector, the configuration of an ingest pipeline is populated with default settings.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-pipeline-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param pipeline:
@@ -1809,7 +1823,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector scheduling.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-scheduling-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param scheduling:
@@ -1865,7 +1879,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector service type.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-service-type-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param service_type:
@@ -1928,7 +1942,7 @@ class ConnectorClient(NamespacedClient):
           <p>Update the connector status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-connector-status-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status>`_
 
         :param connector_id: The unique identifier of the connector to be updated
         :param status:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/dangling_indices.py 9.2.0-1/elasticsearch/_sync/client/dangling_indices.py
--- 8.17.2-2/elasticsearch/_sync/client/dangling_indices.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/dangling_indices.py	2025-10-28 16:50:53.000000000 +0000
@@ -46,7 +46,7 @@ class DanglingIndicesClient(NamespacedCl
           For example, this can happen if you delete more than <code>cluster.indices.tombstones.size</code> indices while an Elasticsearch node is offline.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-delete.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index>`_
 
         :param index_uuid: The UUID of the index to delete. Use the get dangling indices
             API to find the UUID.
@@ -107,7 +107,7 @@ class DanglingIndicesClient(NamespacedCl
           For example, this can happen if you delete more than <code>cluster.indices.tombstones.size</code> indices while an Elasticsearch node is offline.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-index-import.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index>`_
 
         :param index_uuid: The UUID of the index to import. Use the get dangling indices
             API to locate the UUID.
@@ -168,7 +168,7 @@ class DanglingIndicesClient(NamespacedCl
           <p>Use this API to list dangling indices, which you can then import or delete.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/dangling-indices-list.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_dangling"
diff -pruN 8.17.2-2/elasticsearch/_sync/client/enrich.py 9.2.0-1/elasticsearch/_sync/client/enrich.py
--- 8.17.2-2/elasticsearch/_sync/client/enrich.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/enrich.py	2025-10-28 16:50:53.000000000 +0000
@@ -33,6 +33,7 @@ class EnrichClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -42,9 +43,10 @@ class EnrichClient(NamespacedClient):
           Deletes an existing enrich policy and its enrich index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-enrich-policy-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy>`_
 
         :param name: Enrich policy to delete.
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -57,6 +59,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -77,6 +81,7 @@ class EnrichClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         wait_for_completion: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -87,9 +92,10 @@ class EnrichClient(NamespacedClient):
           Create the enrich index for an existing enrich policy.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/execute-enrich-policy-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy>`_
 
         :param name: Enrich policy to execute.
+        :param master_timeout: Period to wait for a connection to the master node.
         :param wait_for_completion: If `true`, the request blocks other enrich policy
             execution requests until complete.
         """
@@ -104,6 +110,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if wait_for_completion is not None:
@@ -126,6 +134,7 @@ class EnrichClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -135,10 +144,11 @@ class EnrichClient(NamespacedClient):
           Returns information about an enrich policy.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-enrich-policy-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy>`_
 
         :param name: Comma-separated list of enrich policy names used to limit the request.
             To return information for all enrich policies, omit this parameter.
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str]
         if name not in SKIP_IN_PATH:
@@ -154,6 +164,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -177,6 +189,7 @@ class EnrichClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         geo_match: t.Optional[t.Mapping[str, t.Any]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         match: t.Optional[t.Mapping[str, t.Any]] = None,
         pretty: t.Optional[bool] = None,
         range: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -189,11 +202,12 @@ class EnrichClient(NamespacedClient):
           Creates an enrich policy.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-enrich-policy-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy>`_
 
         :param name: Name of the enrich policy to create or update.
         :param geo_match: Matches enrich data to incoming documents based on a `geo_shape`
             query.
+        :param master_timeout: Period to wait for a connection to the master node.
         :param match: Matches enrich data to incoming documents based on a `term` query.
         :param range: Matches a number, date, or IP address in incoming documents to
             a range in the enrich index based on a `term` query.
@@ -210,6 +224,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -237,6 +253,7 @@ class EnrichClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -246,7 +263,9 @@ class EnrichClient(NamespacedClient):
           Returns enrich coordinator statistics and information about enrich policies that are currently executing.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/enrich-stats-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats>`_
+
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_enrich/_stats"
@@ -257,6 +276,8 @@ class EnrichClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
diff -pruN 8.17.2-2/elasticsearch/_sync/client/eql.py 9.2.0-1/elasticsearch/_sync/client/eql.py
--- 8.17.2-2/elasticsearch/_sync/client/eql.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/eql.py	2025-10-28 16:50:53.000000000 +0000
@@ -43,7 +43,7 @@ class EqlClient(NamespacedClient):
           The API also deletes results for the search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/eql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete>`_
 
         :param id: Identifier for the search to delete. A search ID is provided in the
             EQL search API's response for an async search. A search ID is also provided
@@ -93,7 +93,7 @@ class EqlClient(NamespacedClient):
           Get the current status and available results for an async EQL search or a stored synchronous EQL search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-eql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get>`_
 
         :param id: Identifier for the search.
         :param keep_alive: Period for which the search and its results are stored on
@@ -147,7 +147,7 @@ class EqlClient(NamespacedClient):
           Get the current status for an async EQL search or a stored synchronous EQL search without returning results.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-eql-status-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status>`_
 
         :param id: Identifier for the search.
         """
@@ -177,6 +177,8 @@ class EqlClient(NamespacedClient):
     @_rewrite_parameters(
         body_fields=(
             "query",
+            "allow_partial_search_results",
+            "allow_partial_sequence_results",
             "case_sensitive",
             "event_category_field",
             "fetch_size",
@@ -184,6 +186,7 @@ class EqlClient(NamespacedClient):
             "filter",
             "keep_alive",
             "keep_on_completion",
+            "max_samples_per_key",
             "result_position",
             "runtime_mappings",
             "size",
@@ -198,7 +201,10 @@ class EqlClient(NamespacedClient):
         index: t.Union[str, t.Sequence[str]],
         query: t.Optional[str] = None,
         allow_no_indices: t.Optional[bool] = None,
+        allow_partial_search_results: t.Optional[bool] = None,
+        allow_partial_sequence_results: t.Optional[bool] = None,
         case_sensitive: t.Optional[bool] = None,
+        ccs_minimize_roundtrips: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         event_category_field: t.Optional[str] = None,
         expand_wildcards: t.Optional[
@@ -221,7 +227,9 @@ class EqlClient(NamespacedClient):
         ignore_unavailable: t.Optional[bool] = None,
         keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         keep_on_completion: t.Optional[bool] = None,
+        max_samples_per_key: t.Optional[int] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None,
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         size: t.Optional[int] = None,
@@ -240,15 +248,29 @@ class EqlClient(NamespacedClient):
           EQL assumes each document in a data stream or index corresponds to an event.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/eql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search>`_
 
         :param index: The name of the index to scope the operation
         :param query: EQL query you wish to run.
-        :param allow_no_indices:
+        :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
+            into no concrete indices. (This includes `_all` string or when no indices
+            have been specified)
+        :param allow_partial_search_results: Allow query execution also in case of shard
+            failures. If true, the query will keep running and will return results based
+            on the available shards. For sequences, the behavior can be further refined
+            using allow_partial_sequence_results
+        :param allow_partial_sequence_results: This flag applies only to sequences and
+            has effect only if allow_partial_search_results=true. If true, the sequence
+            query will return results based on the available shards, ignoring the others.
+            If false, the sequence query will return successfully, but will always have
+            empty results.
         :param case_sensitive:
+        :param ccs_minimize_roundtrips: Indicates whether network round-trips should
+            be minimized as part of cross-cluster search requests execution
         :param event_category_field: Field containing the event classification, such
             as process, file, or network.
-        :param expand_wildcards:
+        :param expand_wildcards: Whether to expand wildcard expression to concrete indices
+            that are open, closed or both.
         :param fetch_size: Maximum number of events to search at a time for sequence
             queries.
         :param fields: Array of wildcard (*) patterns. The response returns values for
@@ -259,6 +281,15 @@ class EqlClient(NamespacedClient):
             in the response.
         :param keep_alive:
         :param keep_on_completion:
+        :param max_samples_per_key: By default, the response of a sample query contains
+            up to `10` samples, with one sample per unique set of join keys. Use the
+            `size` parameter to get a smaller or larger set of samples. To retrieve more
+            than one sample per set of join keys, use the `max_samples_per_key` parameter.
+            Pipes are not supported for sample queries.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param result_position:
         :param runtime_mappings:
         :param size: For basic queries, the maximum number of matching events to return.
@@ -278,6 +309,8 @@ class EqlClient(NamespacedClient):
         __body: t.Dict[str, t.Any] = body if body is not None else {}
         if allow_no_indices is not None:
             __query["allow_no_indices"] = allow_no_indices
+        if ccs_minimize_roundtrips is not None:
+            __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if expand_wildcards is not None:
@@ -290,9 +323,17 @@ class EqlClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if not __body:
             if query is not None:
                 __body["query"] = query
+            if allow_partial_search_results is not None:
+                __body["allow_partial_search_results"] = allow_partial_search_results
+            if allow_partial_sequence_results is not None:
+                __body["allow_partial_sequence_results"] = (
+                    allow_partial_sequence_results
+                )
             if case_sensitive is not None:
                 __body["case_sensitive"] = case_sensitive
             if event_category_field is not None:
@@ -307,6 +348,8 @@ class EqlClient(NamespacedClient):
                 __body["keep_alive"] = keep_alive
             if keep_on_completion is not None:
                 __body["keep_on_completion"] = keep_on_completion
+            if max_samples_per_key is not None:
+                __body["max_samples_per_key"] = max_samples_per_key
             if result_position is not None:
                 __body["result_position"] = result_position
             if runtime_mappings is not None:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/esql.py 9.2.0-1/elasticsearch/_sync/client/esql.py
--- 8.17.2-2/elasticsearch/_sync/client/esql.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/esql.py	2025-10-28 16:50:53.000000000 +0000
@@ -20,7 +20,16 @@ import typing as t
 from elastic_transport import ObjectApiResponse
 
 from ._base import NamespacedClient
-from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters
+from .utils import (
+    SKIP_IN_PATH,
+    Stability,
+    _quote,
+    _rewrite_parameters,
+    _stability_warning,
+)
+
+if t.TYPE_CHECKING:
+    from elasticsearch.esql import ESQLBase
 
 
 class EsqlClient(NamespacedClient):
@@ -31,17 +40,22 @@ class EsqlClient(NamespacedClient):
             "columnar",
             "filter",
             "include_ccs_metadata",
+            "include_execution_metadata",
+            "keep_alive",
+            "keep_on_completion",
             "locale",
             "params",
             "profile",
             "tables",
+            "wait_for_completion_timeout",
         ),
         ignore_deprecated_options={"params"},
     )
     def async_query(
         self,
         *,
-        query: t.Optional[str] = None,
+        query: t.Optional[t.Union[str, "ESQLBase"]] = None,
+        allow_partial_results: t.Optional[bool] = None,
         columnar: t.Optional[bool] = None,
         delimiter: t.Optional[str] = None,
         drop_null_columns: t.Optional[bool] = None,
@@ -58,12 +72,11 @@ class EsqlClient(NamespacedClient):
         ] = None,
         human: t.Optional[bool] = None,
         include_ccs_metadata: t.Optional[bool] = None,
+        include_execution_metadata: t.Optional[bool] = None,
         keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         keep_on_completion: t.Optional[bool] = None,
         locale: t.Optional[str] = None,
-        params: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
-        ] = None,
+        params: t.Optional[t.Sequence[t.Union[None, bool, float, int, str]]] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
         tables: t.Optional[
@@ -82,10 +95,15 @@ class EsqlClient(NamespacedClient):
           <p>The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/esql-async-query-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query>`_
 
         :param query: The ES|QL query API accepts an ES|QL query string in the query
             parameter, runs it, and returns the results.
+        :param allow_partial_results: If `true`, partial results will be returned if
+            there are shard failures, but the query can continue to execute on other
+            clusters and shards. If `false`, the query will fail if there are any failures.
+            To override the default behavior, you can set the `esql.query.allow_partial_results`
+            cluster setting to `false`.
         :param columnar: By default, ES|QL returns results as rows. For example, FROM
             returns each individual document as one row. For the JSON, YAML, CBOR and
             smile formats, ES|QL can return the results in a columnar fashion where one
@@ -98,8 +116,17 @@ class EsqlClient(NamespacedClient):
             which has the name of all the columns.
         :param filter: Specify a Query DSL query in the filter parameter to filter the
             set of documents that an ES|QL query runs on.
-        :param format: A short version of the Accept header, for example `json` or `yaml`.
-        :param include_ccs_metadata: When set to `true` and performing a cross-cluster
+        :param format: A short version of the Accept header, e.g. json, yaml. `csv`,
+            `tsv`, and `txt` formats will return results in a tabular format, excluding
+            other metadata fields from the response. For async requests, nothing will
+            be returned if the async query doesn't finish within the timeout. The query
+            ID and running status are available in the `X-Elasticsearch-Async-Id` and
+            `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively.
+        :param include_ccs_metadata: When set to `true` and performing a cross-cluster/cross-project
+            query, the response will include an extra `_clusters` object with information
+            about the clusters that participated in the search along with info such as
+            shards count.
+        :param include_execution_metadata: When set to `true` and performing a cross-cluster/cross-project
             query, the response will include an extra `_clusters` object with information
             about the clusters that participated in the search along with info such as
             shards count.
@@ -134,6 +161,8 @@ class EsqlClient(NamespacedClient):
         __path = "/_query/async"
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if allow_partial_results is not None:
+            __query["allow_partial_results"] = allow_partial_results
         if delimiter is not None:
             __query["delimiter"] = delimiter
         if drop_null_columns is not None:
@@ -146,23 +175,23 @@ class EsqlClient(NamespacedClient):
             __query["format"] = format
         if human is not None:
             __query["human"] = human
-        if keep_alive is not None:
-            __query["keep_alive"] = keep_alive
-        if keep_on_completion is not None:
-            __query["keep_on_completion"] = keep_on_completion
         if pretty is not None:
             __query["pretty"] = pretty
-        if wait_for_completion_timeout is not None:
-            __query["wait_for_completion_timeout"] = wait_for_completion_timeout
         if not __body:
             if query is not None:
-                __body["query"] = query
+                __body["query"] = str(query)
             if columnar is not None:
                 __body["columnar"] = columnar
             if filter is not None:
                 __body["filter"] = filter
             if include_ccs_metadata is not None:
                 __body["include_ccs_metadata"] = include_ccs_metadata
+            if include_execution_metadata is not None:
+                __body["include_execution_metadata"] = include_execution_metadata
+            if keep_alive is not None:
+                __body["keep_alive"] = keep_alive
+            if keep_on_completion is not None:
+                __body["keep_on_completion"] = keep_on_completion
             if locale is not None:
                 __body["locale"] = locale
             if params is not None:
@@ -171,6 +200,8 @@ class EsqlClient(NamespacedClient):
                 __body["profile"] = profile
             if tables is not None:
                 __body["tables"] = tables
+            if wait_for_completion_timeout is not None:
+                __body["wait_for_completion_timeout"] = wait_for_completion_timeout
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -205,7 +236,7 @@ class EsqlClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/esql-async-query-delete-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete>`_
 
         :param id: The unique identifier of the query. A query ID is provided in the
             ES|QL async query API response for a query that does not complete in the
@@ -243,6 +274,14 @@ class EsqlClient(NamespacedClient):
         drop_null_columns: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        format: t.Optional[
+            t.Union[
+                str,
+                t.Literal[
+                    "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml"
+                ],
+            ]
+        ] = None,
         human: t.Optional[bool] = None,
         keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
@@ -258,7 +297,7 @@ class EsqlClient(NamespacedClient):
           If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/esql-async-query-get-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get>`_
 
         :param id: The unique identifier of the query. A query ID is provided in the
             ES|QL async query API response for a query that does not complete in the
@@ -268,6 +307,7 @@ class EsqlClient(NamespacedClient):
             will be removed from the `columns` and `values` portion of the results. If
             `true`, the response will include an extra section under the name `all_columns`
             which has the name of all the columns.
+        :param format: A short version of the Accept header, for example `json` or `yaml`.
         :param keep_alive: The period for which the query and its results are stored
             in the cluster. When this period expires, the query and its results are deleted,
             even if the query is still ongoing.
@@ -288,6 +328,8 @@ class EsqlClient(NamespacedClient):
             __query["error_trace"] = error_trace
         if filter_path is not None:
             __query["filter_path"] = filter_path
+        if format is not None:
+            __query["format"] = format
         if human is not None:
             __query["human"] = human
         if keep_alive is not None:
@@ -306,12 +348,153 @@ class EsqlClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters()
+    def async_query_stop(
+        self,
+        *,
+        id: str,
+        drop_null_columns: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Stop async ES|QL query.</p>
+          <p>This API interrupts the query execution and returns the results so far.
+          If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop>`_
+
+        :param id: The unique identifier of the query. A query ID is provided in the
+            ES|QL async query API response for a query that does not complete in the
+            designated time. A query ID is also provided when the request was submitted
+            with the `keep_on_completion` parameter set to `true`.
+        :param drop_null_columns: Indicates whether columns that are entirely `null`
+            will be removed from the `columns` and `values` portion of the results. If
+            `true`, the response will include an extra section under the name `all_columns`
+            which has the name of all the columns.
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'id'")
+        __path_parts: t.Dict[str, str] = {"id": _quote(id)}
+        __path = f'/_query/async/{__path_parts["id"]}/stop'
+        __query: t.Dict[str, t.Any] = {}
+        if drop_null_columns is not None:
+            __query["drop_null_columns"] = drop_null_columns
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="esql.async_query_stop",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def get_query(
+        self,
+        *,
+        id: str,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get a specific running ES|QL query information.
+          Returns an object extended information about a running ES|QL query.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-get-query>`_
+
+        :param id: The query ID
+        """
+        if id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'id'")
+        __path_parts: t.Dict[str, str] = {"id": _quote(id)}
+        __path = f'/_query/queries/{__path_parts["id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="esql.get_query",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def list_queries(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get running ES|QL queries information.
+          Returns an object containing IDs and other information about the running ES|QL queries.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-list-queries>`_
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_query/queries"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="esql.list_queries",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters(
         body_fields=(
             "query",
             "columnar",
             "filter",
             "include_ccs_metadata",
+            "include_execution_metadata",
             "locale",
             "params",
             "profile",
@@ -322,7 +505,8 @@ class EsqlClient(NamespacedClient):
     def query(
         self,
         *,
-        query: t.Optional[str] = None,
+        query: t.Optional[t.Union[str, "ESQLBase"]] = None,
+        allow_partial_results: t.Optional[bool] = None,
         columnar: t.Optional[bool] = None,
         delimiter: t.Optional[str] = None,
         drop_null_columns: t.Optional[bool] = None,
@@ -339,9 +523,15 @@ class EsqlClient(NamespacedClient):
         ] = None,
         human: t.Optional[bool] = None,
         include_ccs_metadata: t.Optional[bool] = None,
+        include_execution_metadata: t.Optional[bool] = None,
         locale: t.Optional[str] = None,
         params: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[
+                t.Union[
+                    t.Sequence[t.Union[None, bool, float, int, str]],
+                    t.Union[None, bool, float, int, str],
+                ]
+            ]
         ] = None,
         pretty: t.Optional[bool] = None,
         profile: t.Optional[bool] = None,
@@ -357,10 +547,15 @@ class EsqlClient(NamespacedClient):
           Get search results for an ES|QL (Elasticsearch query language) query.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/esql-rest.html>`_
+        `<https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest>`_
 
         :param query: The ES|QL query API accepts an ES|QL query string in the query
             parameter, runs it, and returns the results.
+        :param allow_partial_results: If `true`, partial results will be returned if
+            there are shard failures, but the query can continue to execute on other
+            clusters and shards. If `false`, the query will fail if there are any failures.
+            To override the default behavior, you can set the `esql.query.allow_partial_results`
+            cluster setting to `false`.
         :param columnar: By default, ES|QL returns results as rows. For example, FROM
             returns each individual document as one row. For the JSON, YAML, CBOR and
             smile formats, ES|QL can return the results in a columnar fashion where one
@@ -373,8 +568,14 @@ class EsqlClient(NamespacedClient):
             `all_columns` which has the name of all columns.
         :param filter: Specify a Query DSL query in the filter parameter to filter the
             set of documents that an ES|QL query runs on.
-        :param format: A short version of the Accept header, e.g. json, yaml.
-        :param include_ccs_metadata: When set to `true` and performing a cross-cluster
+        :param format: A short version of the Accept header, e.g. json, yaml. `csv`,
+            `tsv`, and `txt` formats will return results in a tabular format, excluding
+            other metadata fields from the response.
+        :param include_ccs_metadata: When set to `true` and performing a cross-cluster/cross-project
+            query, the response will include an extra `_clusters` object with information
+            about the clusters that participated in the search along with info such as
+            shards count.
+        :param include_execution_metadata: When set to `true` and performing a cross-cluster/cross-project
             query, the response will include an extra `_clusters` object with information
             about the clusters that participated in the search along with info such as
             shards count.
@@ -395,6 +596,8 @@ class EsqlClient(NamespacedClient):
         __path = "/_query"
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if allow_partial_results is not None:
+            __query["allow_partial_results"] = allow_partial_results
         if delimiter is not None:
             __query["delimiter"] = delimiter
         if drop_null_columns is not None:
@@ -411,13 +614,15 @@ class EsqlClient(NamespacedClient):
             __query["pretty"] = pretty
         if not __body:
             if query is not None:
-                __body["query"] = query
+                __body["query"] = str(query)
             if columnar is not None:
                 __body["columnar"] = columnar
             if filter is not None:
                 __body["filter"] = filter
             if include_ccs_metadata is not None:
                 __body["include_ccs_metadata"] = include_ccs_metadata
+            if include_execution_metadata is not None:
+                __body["include_execution_metadata"] = include_execution_metadata
             if locale is not None:
                 __body["locale"] = locale
             if params is not None:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/features.py 9.2.0-1/elasticsearch/_sync/client/features.py
--- 8.17.2-2/elasticsearch/_sync/client/features.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/features.py	2025-10-28 16:50:53.000000000 +0000
@@ -32,6 +32,7 @@ class FeaturesClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -47,7 +48,9 @@ class FeaturesClient(NamespacedClient):
           In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-features-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features>`_
+
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_features"
@@ -58,6 +61,8 @@ class FeaturesClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -78,6 +83,7 @@ class FeaturesClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -96,7 +102,9 @@ class FeaturesClient(NamespacedClient):
           <p>IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features>`_
+
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_features/_reset"
@@ -107,6 +115,8 @@ class FeaturesClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
diff -pruN 8.17.2-2/elasticsearch/_sync/client/fleet.py 9.2.0-1/elasticsearch/_sync/client/fleet.py
--- 8.17.2-2/elasticsearch/_sync/client/fleet.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/fleet.py	2025-10-28 16:50:53.000000000 +0000
@@ -48,10 +48,12 @@ class FleetClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project.</p>
+          <p>Get global checkpoints.</p>
+          <p>Get the current global checkpoints for an index.
+          This API is designed for internal use by the Fleet server project.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-global-checkpoints.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet>`_
 
         :param index: A single index or index alias that resolves to a single index.
         :param checkpoints: A comma separated list of previous global checkpoints. When
@@ -136,11 +138,14 @@ class FleetClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Executes several <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html">fleet searches</a> with a single API request.
-          The API follows the same structure as the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html">multi search</a> API. However, similar to the fleet search API, it
-          supports the wait_for_checkpoints parameter.</p>
+          <p>Run multiple Fleet searches.
+          Run several Fleet searches with a single API request.
+          The API follows the same structure as the multi search API.
+          However, similar to the Fleet search API, it supports the <code>wait_for_checkpoints</code> parameter.</p>
 
 
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch>`_
+
         :param searches:
         :param index: A single target to search. If the target is an index alias, it
             must resolve to a single index.
@@ -150,9 +155,9 @@ class FleetClient(NamespacedClient):
             example, a request targeting foo*,bar* returns an error if an index starts
             with foo but no index starts with bar.
         :param allow_partial_search_results: If true, returns partial results if there
-            are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
-            If false, returns an error with no partial results. Defaults to the configured
-            cluster setting `search.default_allow_partial_results` which is true by default.
+            are shard request timeouts or shard failures. If false, returns an error
+            with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`,
+            which is true by default.
         :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating
             node and remote clusters are minimized for cross-cluster search requests.
         :param expand_wildcards: Type of index that wildcard expressions can match. If
@@ -326,7 +331,6 @@ class FleetClient(NamespacedClient):
         indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None,
         lenient: t.Optional[bool] = None,
         max_concurrent_shard_requests: t.Optional[int] = None,
-        min_compatible_shard_node: t.Optional[str] = None,
         min_score: t.Optional[float] = None,
         pit: t.Optional[t.Mapping[str, t.Any]] = None,
         post_filter: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -346,7 +350,7 @@ class FleetClient(NamespacedClient):
         script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
         scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         search_type: t.Optional[
             t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]]
@@ -384,9 +388,12 @@ class FleetClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>The purpose of the fleet search api is to provide a search api where the search will only be executed
-          after provided checkpoint has been processed and is visible for searches inside of Elasticsearch.</p>
+          <p>Run a Fleet search.
+          The purpose of the Fleet search API is to provide an API where the search will be run only
+          after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.</p>
+
 
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search>`_
 
         :param index: A single target to search. If the target is an index alias, it
             must resolve to a single index.
@@ -394,9 +401,9 @@ class FleetClient(NamespacedClient):
         :param aggs:
         :param allow_no_indices:
         :param allow_partial_search_results: If true, returns partial results if there
-            are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
-            If false, returns an error with no partial results. Defaults to the configured
-            cluster setting `search.default_allow_partial_results` which is true by default.
+            are shard request timeouts or shard failures. If false, returns an error
+            with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`,
+            which is true by default.
         :param analyze_wildcard:
         :param analyzer:
         :param batched_reduce_size:
@@ -422,9 +429,8 @@ class FleetClient(NamespacedClient):
         :param indices_boost: Boosts the _score of documents from specified indices.
         :param lenient:
         :param max_concurrent_shard_requests:
-        :param min_compatible_shard_node:
         :param min_score: Minimum _score for matching documents. Documents with a lower
-            _score are not included in the search results.
+            _score are not included in search results and results collected by aggregations.
         :param pit: Limits the search to a point in time (PIT). If you provide a PIT,
             you cannot specify an <index> in the request path.
         :param post_filter:
@@ -537,8 +543,6 @@ class FleetClient(NamespacedClient):
             __query["lenient"] = lenient
         if max_concurrent_shard_requests is not None:
             __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests
-        if min_compatible_shard_node is not None:
-            __query["min_compatible_shard_node"] = min_compatible_shard_node
         if pre_filter_shard_size is not None:
             __query["pre_filter_shard_size"] = pre_filter_shard_size
         if preference is not None:
@@ -638,11 +642,7 @@ class FleetClient(NamespacedClient):
                 __body["track_total_hits"] = track_total_hits
             if version is not None:
                 __body["version"] = version
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
diff -pruN 8.17.2-2/elasticsearch/_sync/client/graph.py 9.2.0-1/elasticsearch/_sync/client/graph.py
--- 8.17.2-2/elasticsearch/_sync/client/graph.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/graph.py	2025-10-28 16:50:53.000000000 +0000
@@ -55,7 +55,7 @@ class GraphClient(NamespacedClient):
           You can exclude vertices that have already been returned.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/graph-explore-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph>`_
 
         :param index: Name of the index.
         :param connections: Specifies or more fields from which you want to extract terms
@@ -97,11 +97,7 @@ class GraphClient(NamespacedClient):
                 __body["query"] = query
             if vertices is not None:
                 __body["vertices"] = vertices
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
diff -pruN 8.17.2-2/elasticsearch/_sync/client/ilm.py 9.2.0-1/elasticsearch/_sync/client/ilm.py
--- 8.17.2-2/elasticsearch/_sync/client/ilm.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/ilm.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class IlmClient(NamespacedClient):
           You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-delete-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle>`_
 
         :param name: Identifier for the policy.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -92,7 +92,6 @@ class IlmClient(NamespacedClient):
         only_errors: t.Optional[bool] = None,
         only_managed: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -103,7 +102,7 @@ class IlmClient(NamespacedClient):
           <p>The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-explain-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases to target.
             Supports wildcards (`*`). To target all data streams and indices, use `*`
@@ -116,8 +115,6 @@ class IlmClient(NamespacedClient):
             while executing the policy, or attempting to use a policy that does not exist.
         :param only_managed: Filters the returned indices to only indices that are managed
             by ILM.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -138,8 +135,6 @@ class IlmClient(NamespacedClient):
             __query["only_managed"] = only_managed
         if pretty is not None:
             __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -168,7 +163,7 @@ class IlmClient(NamespacedClient):
           <p>Get lifecycle policies.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-get-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle>`_
 
         :param name: Identifier for the policy.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -219,11 +214,11 @@ class IlmClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get the ILM status.
-          Get the current index lifecycle management status.</p>
+          <p>Get the ILM status.</p>
+          <p>Get the current index lifecycle management status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-get-status.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ilm/status"
@@ -257,6 +252,7 @@ class IlmClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         legacy_template_to_delete: t.Optional[str] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         node_attribute: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
@@ -279,12 +275,16 @@ class IlmClient(NamespacedClient):
           Use the stop ILM and get ILM status APIs to wait until the reported operation mode is <code>STOPPED</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-migrate-to-data-tiers.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers>`_
 
         :param dry_run: If true, simulates the migration from node attributes based allocation
             filters to data tiers, but does not perform the migration. This provides
             a way to retrieve the indices and ILM policies that need to be migrated.
         :param legacy_template_to_delete:
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error. It can also be set to `-1` to indicate that the request
+            should never timeout.
         :param node_attribute:
         """
         __path_parts: t.Dict[str, str] = {}
@@ -299,6 +299,8 @@ class IlmClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
@@ -352,7 +354,7 @@ class IlmClient(NamespacedClient):
           An index cannot move to a step that is not part of its policy.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-move-to-step.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step>`_
 
         :param index: The name of the index whose lifecycle step is to change
         :param current_step: The step that the index is expected to be in.
@@ -381,11 +383,7 @@ class IlmClient(NamespacedClient):
                 __body["current_step"] = current_step
             if next_step is not None:
                 __body["next_step"] = next_step
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -420,7 +418,7 @@ class IlmClient(NamespacedClient):
           <p>NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-put-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle>`_
 
         :param name: Identifier for the policy.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -451,11 +449,7 @@ class IlmClient(NamespacedClient):
         if not __body:
             if policy is not None:
                 __body["policy"] = policy
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -484,7 +478,7 @@ class IlmClient(NamespacedClient):
           It also stops managing the indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-remove-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy>`_
 
         :param index: The name of the index to remove policy on
         """
@@ -530,7 +524,7 @@ class IlmClient(NamespacedClient):
           Use the explain lifecycle state API to determine whether an index is in the ERROR step.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-retry-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry>`_
 
         :param index: The name of the indices (comma-separated) whose failed lifecycle
             step is to be retry
@@ -578,10 +572,13 @@ class IlmClient(NamespacedClient):
           Restarting ILM is necessary only when it has been stopped using the stop ILM API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-start.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start>`_
 
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ilm/start"
@@ -629,10 +626,13 @@ class IlmClient(NamespacedClient):
           Use the get ILM status API to check whether ILM is running.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ilm-stop.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop>`_
 
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ilm/stop"
diff -pruN 8.17.2-2/elasticsearch/_sync/client/indices.py 9.2.0-1/elasticsearch/_sync/client/indices.py
--- 8.17.2-2/elasticsearch/_sync/client/indices.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/indices.py	2025-10-28 16:50:53.000000000 +0000
@@ -57,23 +57,40 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Add an index block.
-          Limits the operations allowed on an index by blocking specific operation types.</p>
+          <p>Add an index block.</p>
+          <p>Add an index block to an index.
+          Index blocks limit the operations allowed on an index by blocking specific operation types.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/index-modules-blocks.html>`_
-
-        :param index: A comma separated list of indices to add a block to
-        :param block: The block to add (one of read, write, read_only or metadata)
-        :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
-            into no concrete indices. (This includes `_all` string or when no indices
-            have been specified)
-        :param expand_wildcards: Whether to expand wildcard expression to concrete indices
-            that are open, closed or both.
-        :param ignore_unavailable: Whether specified concrete indices should be ignored
-            when unavailable (missing or closed)
-        :param master_timeout: Specify timeout for connection to master
-        :param timeout: Explicit operation timeout
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block>`_
+
+        :param index: A comma-separated list or wildcard expression of index names used
+            to limit the request. By default, you must explicitly name the indices you
+            are adding blocks to. To allow the adding of blocks to indices with `_all`,
+            `*`, or other wildcard expressions, change the `action.destructive_requires_name`
+            setting to `false`. You can update this setting in the `elasticsearch.yml`
+            file or by using the cluster update settings API.
+        :param block: The block type to add to the index.
+        :param allow_no_indices: If `false`, the request returns an error if any wildcard
+            expression, index alias, or `_all` value targets only missing or closed indices.
+            This behavior applies even if the request targets other open indices. For
+            example, a request targeting `foo*,bar*` returns an error if an index starts
+            with `foo` but no index starts with `bar`.
+        :param expand_wildcards: The type of index that wildcard patterns can match.
+            If the request can target data streams, this argument determines whether
+            wildcard expressions match hidden data streams. It supports comma-separated
+            values, such as `open,hidden`.
+        :param ignore_unavailable: If `false`, the request returns an error if it targets
+            a missing or closed index.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. It can also be set to `-1` to indicate that the request should
+            never timeout.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. It can
+            also be set to `-1` to indicate that the request should never timeout.
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -156,7 +173,7 @@ class IndicesClient(NamespacedClient):
           The <code>_analyze</code> endpoint without a specified index will always use <code>10000</code> as its limit.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-analyze.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze>`_
 
         :param index: Index used to derive the analyzer. If specified, the `analyzer`
             or field parameter overrides this value. If no index is specified or the
@@ -215,11 +232,7 @@ class IndicesClient(NamespacedClient):
                 __body["text"] = text
             if tokenizer is not None:
                 __body["tokenizer"] = tokenizer
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -231,6 +244,51 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def cancel_migrate_reindex(
+        self,
+        *,
+        index: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Cancel a migration reindex operation.</p>
+          <p>Cancel a migration reindex attempt for a data stream or index.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex>`_
+
+        :param index: The index or data stream name
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'index'")
+        __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+        __path = f'/_migration/reindex/{__path_parts["index"]}/_cancel'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.cancel_migrate_reindex",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     def clear_cache(
         self,
         *,
@@ -265,7 +323,7 @@ class IndicesClient(NamespacedClient):
           To clear the cache only of specific fields, use the <code>fields</code> parameter.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-clearcache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -276,7 +334,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param fielddata: If `true`, clears the fields cache. Use the `fields` parameter
             to clear the cache of specific fields only.
         :param fields: Comma-separated list of field names used to limit the `fielddata`
@@ -387,7 +445,7 @@ class IndicesClient(NamespacedClient):
           <p>Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-clone-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone>`_
 
         :param index: Name of the source index to clone.
         :param target: Name of the target index to create.
@@ -491,7 +549,7 @@ class IndicesClient(NamespacedClient):
           Closing indices can be turned off with the cluster settings API by setting <code>cluster.indices.close.enable</code> to <code>false</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-close.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close>`_
 
         :param index: Comma-separated list or wildcard expression of index names used
             to limit the request.
@@ -501,7 +559,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -592,9 +650,17 @@ class IndicesClient(NamespacedClient):
           Note that changing this setting will also affect the <code>wait_for_active_shards</code> value on all subsequent write operations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-create-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create>`_
 
-        :param index: Name of the index you wish to create.
+        :param index: Name of the index you wish to create. Index names must meet the
+            following criteria: * Lowercase only * Cannot include `\\`, `/`, `*`, `?`,
+            `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` * Indices prior to
+            7.0 could contain a colon (`:`), but that has been deprecated and will not
+            be supported in later versions * Cannot start with `-`, `_`, or `+` * Cannot
+            be `.` or `..` * Cannot be longer than 255 bytes (note thtat it is bytes,
+            so multi-byte characters will reach the limit faster) * Names starting with
+            `.` are deprecated, except for hidden indices and internal indices managed
+            by plugins
         :param aliases: Aliases for the index.
         :param mappings: Mapping for fields in the index. If specified, this mapping
             can include: - Field names - Field data types - Mapping parameters
@@ -665,12 +731,11 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Create a data stream.
-          Creates a data stream.
-          You must have a matching index template with data stream enabled.</p>
+          <p>Create a data stream.</p>
+          <p>You must have a matching index template with data stream enabled.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream>`_
 
         :param name: Name of the data stream, which must meet the following criteria:
             Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`,
@@ -710,6 +775,71 @@ class IndicesClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters(
+        body_name="create_from",
+    )
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def create_from(
+        self,
+        *,
+        source: str,
+        dest: str,
+        create_from: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an index from a source index.</p>
+          <p>Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from>`_
+
+        :param source: The source index or data stream name
+        :param dest: The destination index or data stream name
+        :param create_from:
+        """
+        if source in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'source'")
+        if dest in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'dest'")
+        if create_from is not None and body is not None:
+            raise ValueError("Cannot set both 'create_from' and 'body'")
+        __path_parts: t.Dict[str, str] = {
+            "source": _quote(source),
+            "dest": _quote(dest),
+        }
+        __path = f'/_create_from/{__path_parts["source"]}/{__path_parts["dest"]}'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __body = create_from if create_from is not None else body
+        if not __body:
+            __body = None
+        __headers = {"accept": "application/json"}
+        if __body is not None:
+            __headers["content-type"] = "application/json"
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.create_from",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters()
     def data_streams_stats(
         self,
@@ -731,11 +861,11 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get data stream stats.
-          Retrieves statistics for one or more data streams.</p>
+          <p>Get data stream stats.</p>
+          <p>Get statistics for one or more data streams.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1>`_
 
         :param name: Comma-separated list of data streams used to limit the request.
             Wildcard expressions (`*`) are supported. To target all data streams in a
@@ -804,7 +934,7 @@ class IndicesClient(NamespacedClient):
           You can then use the delete index API to delete the previous write index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete>`_
 
         :param index: Comma-separated list of indices to delete. You cannot specify index
             aliases. By default, this parameter does not support wildcards (`*`) or `_all`.
@@ -816,7 +946,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -878,7 +1008,7 @@ class IndicesClient(NamespacedClient):
           Removes a data stream or index from an alias.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-alias.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias>`_
 
         :param index: Comma-separated list of data streams or indices used to limit the
             request. Supports wildcards (`*`).
@@ -946,7 +1076,7 @@ class IndicesClient(NamespacedClient):
           Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-delete-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle>`_
 
         :param name: A comma-separated list of data streams of which the data stream
             lifecycle will be deleted; use `*` to get all data streams
@@ -1010,7 +1140,7 @@ class IndicesClient(NamespacedClient):
           Deletes one or more data streams and their backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream>`_
 
         :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions
             are supported.
@@ -1048,6 +1178,71 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    def delete_data_stream_options(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Delete data stream options.
+          Removes the data stream options from a data stream.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream-options>`_
+
+        :param name: A comma-separated list of data streams of which the data stream
+            options will be deleted; use `*` to get all data streams
+        :param expand_wildcards: Whether wildcard expressions should get expanded to
+            open or closed indices (default: open)
+        :param master_timeout: Specify timeout for connection to master
+        :param timeout: Explicit timestamp for the document
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_options'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "DELETE",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.delete_data_stream_options",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     def delete_index_template(
         self,
         *,
@@ -1068,7 +1263,7 @@ class IndicesClient(NamespacedClient):
           existing templates.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template>`_
 
         :param name: Comma-separated list of index template names used to limit the request.
             Wildcard (*) expressions are supported.
@@ -1120,10 +1315,11 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a legacy index template.</p>
+          <p>Delete a legacy index template.
+          IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-delete-template-v1.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template>`_
 
         :param name: The name of the legacy index template to delete. Wildcard (`*`)
             expressions are supported.
@@ -1193,9 +1389,10 @@ class IndicesClient(NamespacedClient):
           <p>NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index <code>store_size</code> value because some small metadata files are ignored and some parts of data files might not be scanned by the API.
           Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate.
           The stored size of the <code>_id</code> field is likely underestimated while the <code>_source</code> field is overestimated.</p>
+          <p>For usage examples see the External documentation or refer to <a href="https://www.elastic.co/docs/reference/elasticsearch/rest-apis/index-disk-usage">Analyze the index disk usage example</a> for an example.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-disk-usage.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. It’s recommended to execute this API with a single
@@ -1278,7 +1475,7 @@ class IndicesClient(NamespacedClient):
           The source index must be read only (<code>index.blocks.write: true</code>).</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-downsample-data-stream.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample>`_
 
         :param index: Name of the time series index to downsample.
         :param target_index: Name of the index to create.
@@ -1350,7 +1547,7 @@ class IndicesClient(NamespacedClient):
           Check if one or more indices, index aliases, or data streams exist.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-exists.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases. Supports
             wildcards (`*`).
@@ -1360,7 +1557,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param flat_settings: If `true`, returns settings in flat format.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
@@ -1422,17 +1619,17 @@ class IndicesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
-        local: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> HeadApiResponse:
         """
         .. raw:: html
 
-          <p>Check aliases.
-          Checks if one or more data stream or index aliases exist.</p>
+          <p>Check aliases.</p>
+          <p>Check if one or more data stream or index aliases exist.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias>`_
 
         :param name: Comma-separated list of aliases to check. Supports wildcards (`*`).
         :param index: Comma-separated list of data streams or indices used to limit the
@@ -1444,11 +1641,12 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, requests that include a missing data stream
             or index in the target indices or data streams return an error.
-        :param local: If `true`, the request retrieves information from the local node
-            only.
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -1474,8 +1672,8 @@ class IndicesClient(NamespacedClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
-        if local is not None:
-            __query["local"] = local
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -1495,21 +1693,27 @@ class IndicesClient(NamespacedClient):
         name: str,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        flat_settings: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
+        local: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> HeadApiResponse:
         """
         .. raw:: html
 
-          <p>Check index templates.
-          Check whether index templates exist.</p>
+          <p>Check index templates.</p>
+          <p>Check whether index templates exist.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/index-templates.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template>`_
 
         :param name: Comma-separated list of index template names used to limit the request.
             Wildcard (*) expressions are supported.
+        :param flat_settings: If true, returns settings in flat format.
+        :param local: If true, the request retrieves information from the local node
+            only. Defaults to false, which means information is retrieved from the master
+            node.
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
@@ -1523,8 +1727,12 @@ class IndicesClient(NamespacedClient):
             __query["error_trace"] = error_trace
         if filter_path is not None:
             __query["filter_path"] = filter_path
+        if flat_settings is not None:
+            __query["flat_settings"] = flat_settings
         if human is not None:
             __query["human"] = human
+        if local is not None:
+            __query["local"] = local
         if master_timeout is not None:
             __query["master_timeout"] = master_timeout
         if pretty is not None:
@@ -1561,7 +1769,7 @@ class IndicesClient(NamespacedClient):
           <p>IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-template-exists-v1.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template>`_
 
         :param name: A comma-separated list of index template names used to limit the
             request. Wildcard (`*`) expressions are supported.
@@ -1619,7 +1827,7 @@ class IndicesClient(NamespacedClient):
           Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-explain-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle>`_
 
         :param index: The name of the index to explain
         :param include_defaults: indicates if the API should return the default values
@@ -1673,12 +1881,7 @@ class IndicesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        wait_for_active_shards: t.Optional[
-            t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]]
-        ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -1691,7 +1894,7 @@ class IndicesClient(NamespacedClient):
           A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/field-usage-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats>`_
 
         :param index: Comma-separated list or wildcard expression of index names used
             to limit the request.
@@ -1708,14 +1911,6 @@ class IndicesClient(NamespacedClient):
             in the statistics.
         :param ignore_unavailable: If `true`, missing or closed indices are not included
             in the response.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
-        :param wait_for_active_shards: The number of shard copies that must be active
-            before proceeding with the operation. Set to all or any positive integer
-            up to the total number of shards in the index (`number_of_replicas+1`).
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -1736,14 +1931,8 @@ class IndicesClient(NamespacedClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
-        if wait_for_active_shards is not None:
-            __query["wait_for_active_shards"] = wait_for_active_shards
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -1790,7 +1979,7 @@ class IndicesClient(NamespacedClient):
           If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-flush.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases to flush.
             Supports wildcards (`*`). To flush all data streams and indices, omit this
@@ -1801,7 +1990,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param force: If `true`, the request forces a flush even if there are no changes
             to commit to the index.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
@@ -1915,7 +2104,7 @@ class IndicesClient(NamespacedClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-forcemerge.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge>`_
 
         :param index: A comma-separated list of index names; use `_all` or empty string
             to perform the operation on all indices
@@ -2013,7 +2202,7 @@ class IndicesClient(NamespacedClient):
           stream’s backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get>`_
 
         :param index: Comma-separated list of data streams, indices, and index aliases
             used to limit the request. Wildcard expressions (*) are supported.
@@ -2096,7 +2285,7 @@ class IndicesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
-        local: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -2106,7 +2295,7 @@ class IndicesClient(NamespacedClient):
           Retrieves information for one or more data stream or index aliases.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-alias.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias>`_
 
         :param index: Comma-separated list of data streams or indices used to limit the
             request. Supports wildcards (`*`). To target all data streams and indices,
@@ -2119,11 +2308,12 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
-        :param local: If `true`, the request retrieves information from the local node
-            only.
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
         """
         __path_parts: t.Dict[str, str]
         if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH:
@@ -2151,8 +2341,8 @@ class IndicesClient(NamespacedClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
-        if local is not None:
-            __query["local"] = local
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -2188,18 +2378,17 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get data stream lifecycles.
-          Retrieves the data stream lifecycle configuration of one or more data streams.</p>
+          <p>Get data stream lifecycles.</p>
+          <p>Get the data stream lifecycle configuration of one or more data streams.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-get-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle>`_
 
         :param name: Comma-separated list of data streams to limit the request. Supports
             wildcards (`*`). To target all data streams, omit this parameter or use `*`
             or `_all`.
         :param expand_wildcards: Type of data stream that wildcard patterns can match.
-            Supports comma-separated values, such as `open,hidden`. Valid values are:
-            `all`, `open`, `closed`, `hidden`, `none`.
+            Supports comma-separated values, such as `open,hidden`.
         :param include_defaults: If `true`, return all default settings in the response.
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
@@ -2250,7 +2439,7 @@ class IndicesClient(NamespacedClient):
           Get statistics about the data streams that are managed by a data stream lifecycle.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-get-lifecycle-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_lifecycle/stats"
@@ -2297,11 +2486,11 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get data streams.
-          Retrieves information about one or more data streams.</p>
+          <p>Get data streams.</p>
+          <p>Get information about one or more data streams.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream>`_
 
         :param name: Comma-separated list of data stream names used to limit the request.
             Wildcard (`*`) expressions are supported. If omitted, all data streams are
@@ -2351,6 +2540,172 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    def get_data_stream_mappings(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get data stream mappings.</p>
+          <p>Get mapping information for one or more data streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-mappings>`_
+
+        :param name: A comma-separated list of data streams or data stream patterns.
+            Supports wildcards (`*`).
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_mappings'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.get_data_stream_mappings",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    def get_data_stream_options(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get data stream options.</p>
+          <p>Get the data stream options configuration of one or more data streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-options>`_
+
+        :param name: Comma-separated list of data streams to limit the request. Supports
+            wildcards (`*`). To target all data streams, omit this parameter or use `*`
+            or `_all`.
+        :param expand_wildcards: Type of data stream that wildcard patterns can match.
+            Supports comma-separated values, such as `open,hidden`.
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_options'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.get_data_stream_options",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    def get_data_stream_settings(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get data stream settings.</p>
+          <p>Get setting information for one or more data streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings>`_
+
+        :param name: A comma-separated list of data streams or data stream patterns.
+            Supports wildcards (`*`).
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_settings'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.get_data_stream_settings",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     def get_field_mapping(
         self,
         *,
@@ -2370,7 +2725,6 @@ class IndicesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         include_defaults: t.Optional[bool] = None,
-        local: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -2382,7 +2736,7 @@ class IndicesClient(NamespacedClient):
           <p>This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-field-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping>`_
 
         :param fields: Comma-separated list or wildcard expression of fields used to
             limit returned information. Supports wildcards (`*`).
@@ -2395,12 +2749,10 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param include_defaults: If `true`, return all default settings in the response.
-        :param local: If `true`, the request retrieves information from the local node
-            only.
         """
         if fields in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'fields'")
@@ -2428,8 +2780,6 @@ class IndicesClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if include_defaults is not None:
             __query["include_defaults"] = include_defaults
-        if local is not None:
-            __query["local"] = local
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -2463,7 +2813,7 @@ class IndicesClient(NamespacedClient):
           Get information about one or more index templates.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template>`_
 
         :param name: Comma-separated list of index template names used to limit the request.
             Wildcard (*) expressions are supported.
@@ -2540,7 +2890,7 @@ class IndicesClient(NamespacedClient):
           For data streams, the API retrieves mappings for the stream’s backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -2551,7 +2901,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param local: If `true`, the request retrieves information from the local node
@@ -2597,6 +2947,51 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def get_migrate_reindex_status(
+        self,
+        *,
+        index: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get the migration reindexing status.</p>
+          <p>Get the status of a migration reindex attempt for a data stream or index.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration>`_
+
+        :param index: The index or data stream name.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'index'")
+        __path_parts: t.Dict[str, str] = {"index": _quote(index)}
+        __path = f'/_migration/reindex/{__path_parts["index"]}/_status'
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.get_migrate_reindex_status",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     def get_settings(
         self,
         *,
@@ -2629,7 +3024,7 @@ class IndicesClient(NamespacedClient):
           For data streams, it returns setting information for the stream's backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -2716,12 +3111,12 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get index templates.
+          <p>Get legacy index templates.
           Get information about one or more index templates.</p>
           <p>IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-get-template-v1.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template>`_
 
         :param name: Comma-separated list of index template names used to limit the request.
             Wildcard (`*`) expressions are supported. To return all index templates,
@@ -2765,6 +3160,62 @@ class IndicesClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters(
+        body_name="reindex",
+    )
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def migrate_reindex(
+        self,
+        *,
+        reindex: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Reindex legacy backing indices.</p>
+          <p>Reindex all legacy backing indices for a data stream.
+          This operation occurs in a persistent task.
+          The persistent task ID is returned immediately and the reindexing work is completed in that task.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex>`_
+
+        :param reindex:
+        """
+        if reindex is None and body is None:
+            raise ValueError(
+                "Empty value passed for parameters 'reindex' and 'body', one of them should be set."
+            )
+        elif reindex is not None and body is not None:
+            raise ValueError("Cannot set both 'reindex' and 'body'")
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_migration/reindex"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __body = reindex if reindex is not None else body
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.migrate_reindex",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters()
     def migrate_to_data_stream(
         self,
@@ -2793,7 +3244,7 @@ class IndicesClient(NamespacedClient):
           The write index for the alias becomes the write index for the stream.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream>`_
 
         :param name: Name of the index alias to convert to a data stream.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -2849,7 +3300,7 @@ class IndicesClient(NamespacedClient):
           Performs one or more data stream modification actions in a single atomic operation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream>`_
 
         :param actions: Actions to perform.
         """
@@ -2928,7 +3379,7 @@ class IndicesClient(NamespacedClient):
           <p>Because opening or closing an index allocates its shards, the <code>wait_for_active_shards</code> setting on index creation applies to the <code>_open</code> and <code>_close</code> index actions as well.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-open-close.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). By default, you must explicitly
@@ -2942,7 +3393,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -3014,7 +3465,7 @@ class IndicesClient(NamespacedClient):
           This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream>`_
 
         :param name: The name of the data stream
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -3080,7 +3531,7 @@ class IndicesClient(NamespacedClient):
           Adds a data stream or index to an alias.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias>`_
 
         :param index: Comma-separated list of data streams or indices to add. Supports
             wildcards (`*`). Wildcard patterns that match both data streams and indices
@@ -3155,7 +3606,7 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("data_retention", "downsampling"),
+        body_fields=("data_retention", "downsampling", "enabled"),
     )
     def put_data_lifecycle(
         self,
@@ -3163,6 +3614,7 @@ class IndicesClient(NamespacedClient):
         name: t.Union[str, t.Sequence[str]],
         data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         downsampling: t.Optional[t.Mapping[str, t.Any]] = None,
+        enabled: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         expand_wildcards: t.Optional[
             t.Union[
@@ -3186,7 +3638,7 @@ class IndicesClient(NamespacedClient):
           Update the data stream lifecycle of the specified data streams.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/data-streams-put-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle>`_
 
         :param name: Comma-separated list of data streams used to limit the request.
             Supports wildcards (`*`). To target all data streams use `*` or `_all`.
@@ -3194,12 +3646,13 @@ class IndicesClient(NamespacedClient):
             be stored at least for this time frame. Any time after this duration the
             document could be deleted. When empty, every document in this data stream
             will be stored indefinitely.
-        :param downsampling: If defined, every backing index will execute the configured
-            downsampling configuration after the backing index is not the data stream
-            write index anymore.
+        :param downsampling: The downsampling configuration to execute for the managed
+            backing index after rollover.
+        :param enabled: If defined, it turns data stream lifecycle on/off (`true`/`false`)
+            for this data stream. A data stream lifecycle that's disabled (enabled: `false`)
+            will have no effect on the data stream.
         :param expand_wildcards: Type of data stream that wildcard patterns can match.
-            Supports comma-separated values, such as `open,hidden`. Valid values are:
-            `all`, `hidden`, `open`, `closed`, `none`.
+            Supports comma-separated values, such as `open,hidden`.
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
@@ -3231,11 +3684,9 @@ class IndicesClient(NamespacedClient):
                 __body["data_retention"] = data_retention
             if downsampling is not None:
                 __body["downsampling"] = downsampling
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+            if enabled is not None:
+                __body["enabled"] = enabled
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -3247,6 +3698,240 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
+        body_name="mappings",
+    )
+    def put_data_stream_mappings(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        mappings: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
+        dry_run: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Update data stream mappings.</p>
+          <p>This API can be used to override mappings on specific data streams. These overrides will take precedence over what
+          is specified in the template that the data stream matches. The mapping change is only applied to new write indices
+          that are created during rollover after this API is called. No indices are changed by this API.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-mappings>`_
+
+        :param name: A comma-separated list of data streams or data stream patterns.
+        :param mappings:
+        :param dry_run: If `true`, the request does not actually change the mappings
+            on any data streams. Instead, it simulates changing the settings and reports
+            back to the user what would have happened had these settings actually been
+            applied.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        if mappings is None and body is None:
+            raise ValueError(
+                "Empty value passed for parameters 'mappings' and 'body', one of them should be set."
+            )
+        elif mappings is not None and body is not None:
+            raise ValueError("Cannot set both 'mappings' and 'body'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_mappings'
+        __query: t.Dict[str, t.Any] = {}
+        if dry_run is not None:
+            __query["dry_run"] = dry_run
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __body = mappings if mappings is not None else body
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.put_data_stream_mappings",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("failure_store",),
+    )
+    def put_data_stream_options(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
+        failure_store: t.Optional[t.Mapping[str, t.Any]] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Update data stream options.
+          Update the data stream options of the specified data streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-options>`_
+
+        :param name: Comma-separated list of data streams used to limit the request.
+            Supports wildcards (`*`). To target all data streams use `*` or `_all`.
+        :param expand_wildcards: Type of data stream that wildcard patterns can match.
+            Supports comma-separated values, such as `open,hidden`.
+        :param failure_store: If defined, it will update the failure store configuration
+            of every data stream resolved by the name expression.
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_options'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if failure_store is not None:
+                __body["failure_store"] = failure_store
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.put_data_stream_options",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_name="settings",
+    )
+    def put_data_stream_settings(
+        self,
+        *,
+        name: t.Union[str, t.Sequence[str]],
+        settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
+        dry_run: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Update data stream settings.</p>
+          <p>This API can be used to override settings on specific data streams. These overrides will take precedence over what
+          is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state,
+          only certain settings are allowed. If possible, the setting change is applied to all
+          backing indices. Otherwise, it will be applied when the data stream is next rolled over.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings>`_
+
+        :param name: A comma-separated list of data streams or data stream patterns.
+        :param settings:
+        :param dry_run: If `true`, the request does not actually change the settings
+            on any data streams or indices. Instead, it simulates changing the settings
+            and reports back to the user what would have happened had these settings
+            actually been applied.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
+        """
+        if name in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'name'")
+        if settings is None and body is None:
+            raise ValueError(
+                "Empty value passed for parameters 'settings' and 'body', one of them should be set."
+            )
+        elif settings is not None and body is not None:
+            raise ValueError("Cannot set both 'settings' and 'body'")
+        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
+        __path = f'/_data_stream/{__path_parts["name"]}/_settings'
+        __query: t.Dict[str, t.Any] = {}
+        if dry_run is not None:
+            __query["dry_run"] = dry_run
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __body = settings if settings is not None else body
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="indices.put_data_stream_settings",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
         body_fields=(
             "allow_auto_create",
             "composed_of",
@@ -3310,7 +3995,7 @@ class IndicesClient(NamespacedClient):
           If an entry already exists with the same key, then it is overwritten by the new definition.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template>`_
 
         :param name: Index or template name
         :param allow_auto_create: This setting overrides the value of the `action.auto_create_index`
@@ -3473,27 +4158,20 @@ class IndicesClient(NamespacedClient):
 
           <p>Update field mappings.
           Add new fields to an existing data stream or index.
-          You can also use this API to change the search settings of existing fields and add new properties to existing object fields.
-          For data streams, these changes are applied to all backing indices by default.</p>
-          <p><strong>Add multi-fields to an existing field</strong></p>
-          <p>Multi-fields let you index the same field in different ways.
-          You can use this API to update the fields mapping parameter and enable multi-fields for an existing field.
-          WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.
-          You can populate the new multi-field with the update by query API.</p>
-          <p><strong>Change supported mapping parameters for an existing field</strong></p>
-          <p>The documentation for each mapping parameter indicates whether you can update it for an existing field using this API.
-          For example, you can use the update mapping API to update the <code>ignore_above</code> parameter.</p>
-          <p><strong>Change the mapping of an existing field</strong></p>
-          <p>Except for supported mapping parameters, you can't change the mapping or field type of an existing field.
-          Changing an existing field could invalidate data that's already indexed.</p>
-          <p>If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.
-          If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.</p>
-          <p><strong>Rename a field</strong></p>
-          <p>Renaming a field would invalidate data already indexed under the old field name.
-          Instead, add an alias field to create an alternate field name.</p>
+          You can use the update mapping API to:</p>
+          <ul>
+          <li>Add a new field to an existing index</li>
+          <li>Update mappings for multiple indices in a single request</li>
+          <li>Add new properties to an object field</li>
+          <li>Enable multi-fields for an existing field</li>
+          <li>Update supported mapping parameters</li>
+          <li>Change a field's mapping using reindexing</li>
+          <li>Rename a field using a field alias</li>
+          </ul>
+          <p>Learn how to use the update mapping API with practical examples in the <a href="https://www.elastic.co/docs/manage-data/data-store/mapping/update-mappings-examples">Update mapping API examples</a> guide.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-put-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping>`_
 
         :param index: A comma-separated list of index names the mapping should be added
             to (supports wildcards); use `_all` or omit to add the mapping on all indices.
@@ -3509,7 +4187,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param field_names: Control whether field names are enabled for the index.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
@@ -3617,6 +4295,7 @@ class IndicesClient(NamespacedClient):
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         preserve_existing: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        reopen: t.Optional[bool] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -3626,8 +4305,36 @@ class IndicesClient(NamespacedClient):
           Changes dynamic index settings in real time.
           For data streams, index setting changes are applied to all backing indices by default.</p>
           <p>To revert a setting to the default value, use a null value.
-          The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.
+          The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.
           To preserve existing settings from being updated, set the <code>preserve_existing</code> parameter to <code>true</code>.</p>
+          <p>For performance optimization during bulk indexing, you can disable the refresh interval.
+          Refer to <a href="https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval">disable refresh interval</a> for an example.
+          There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:</p>
+          <pre><code>{
+            &quot;number_of_replicas&quot;: 1
+          }
+          </code></pre>
+          <p>Or you can use an <code>index</code> setting object:</p>
+          <pre><code>{
+            &quot;index&quot;: {
+              &quot;number_of_replicas&quot;: 1
+            }
+          }
+          </code></pre>
+          <p>Or you can use dot annotation:</p>
+          <pre><code>{
+            &quot;index.number_of_replicas&quot;: 1
+          }
+          </code></pre>
+          <p>Or you can embed any of the aforementioned options in a <code>settings</code> object. For example:</p>
+          <pre><code>{
+            &quot;settings&quot;: {
+              &quot;index&quot;: {
+                &quot;number_of_replicas&quot;: 1
+              }
+            }
+          }
+          </code></pre>
           <p>NOTE: You can only define new analyzers on closed indices.
           To add an analyzer, you must close the index, define the analyzer, and reopen the index.
           You cannot close the write index of a data stream.
@@ -3635,10 +4342,11 @@ class IndicesClient(NamespacedClient):
           Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.
           This affects searches and any new data added to the stream after the rollover.
           However, it does not affect the data stream's backing indices or their existing data.
-          To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.</p>
+          To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.
+          Refer to <a href="https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices">updating analyzers on existing indices</a> for step-by-step examples.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-update-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings>`_
 
         :param settings:
         :param index: Comma-separated list of data streams, indices, and aliases used
@@ -3659,6 +4367,9 @@ class IndicesClient(NamespacedClient):
             no response is received before the timeout expires, the request fails and
             returns an error.
         :param preserve_existing: If `true`, existing index settings remain unchanged.
+        :param reopen: Whether to close and reopen the index to apply non-dynamic settings.
+            If set to `true` the indices to which the settings are being applied will
+            be closed temporarily and then reopened in order to apply the changes.
         :param timeout: Period to wait for a response. If no response is received before
             the timeout expires, the request fails and returns an error.
         """
@@ -3696,6 +4407,8 @@ class IndicesClient(NamespacedClient):
             __query["preserve_existing"] = preserve_existing
         if pretty is not None:
             __query["pretty"] = pretty
+        if reopen is not None:
+            __query["reopen"] = reopen
         if timeout is not None:
             __query["timeout"] = timeout
         __body = settings if settings is not None else body
@@ -3742,7 +4455,7 @@ class IndicesClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Create or update an index template.
+          <p>Create or update a legacy index template.
           Index templates define settings, mappings, and aliases that can be applied automatically to new indices.
           Elasticsearch applies templates to new indices based on an index pattern that matches the index name.</p>
           <p>IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.</p>
@@ -3759,11 +4472,11 @@ class IndicesClient(NamespacedClient):
           NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-templates-v1.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template>`_
 
         :param name: The name of the template
         :param aliases: Aliases for the index.
-        :param cause:
+        :param cause: User defined reason for creating/updating the index template
         :param create: If true, this request cannot replace or update existing index
             templates.
         :param index_patterns: Array of wildcard expressions used to match the names
@@ -3831,10 +4544,20 @@ class IndicesClient(NamespacedClient):
         *,
         index: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         active_only: t.Optional[bool] = None,
+        allow_no_indices: t.Optional[bool] = None,
         detailed: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        ignore_unavailable: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -3861,14 +4584,23 @@ class IndicesClient(NamespacedClient):
           This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-recovery.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
             and indices, omit this parameter or use `*` or `_all`.
         :param active_only: If `true`, the response only includes ongoing shard recoveries.
+        :param allow_no_indices: If `false`, the request returns an error if any wildcard
+            expression, index alias, or `_all` value targets only missing or closed indices.
+            This behavior applies even if the request targets other open indices.
         :param detailed: If `true`, the response includes detailed information about
             shard recoveries.
+        :param expand_wildcards: Type of index that wildcard patterns can match. If the
+            request can target data streams, this argument determines whether wildcard
+            expressions match hidden data streams. Supports comma-separated values, such
+            as `open,hidden`.
+        :param ignore_unavailable: If `false`, the request returns an error if it targets
+            a missing or closed index.
         """
         __path_parts: t.Dict[str, str]
         if index not in SKIP_IN_PATH:
@@ -3880,14 +4612,20 @@ class IndicesClient(NamespacedClient):
         __query: t.Dict[str, t.Any] = {}
         if active_only is not None:
             __query["active_only"] = active_only
+        if allow_no_indices is not None:
+            __query["allow_no_indices"] = allow_no_indices
         if detailed is not None:
             __query["detailed"] = detailed
         if error_trace is not None:
             __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
         if filter_path is not None:
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if ignore_unavailable is not None:
+            __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -3928,6 +4666,7 @@ class IndicesClient(NamespacedClient):
           For data streams, the API runs the refresh operation on the stream’s backing indices.</p>
           <p>By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.
           You can change this default interval with the <code>index.refresh_interval</code> setting.</p>
+          <p>In Elastic Cloud Serverless, the default refresh interval is 5 seconds across all indices.</p>
           <p>Refresh requests are synchronous and do not return a response until the refresh operation completes.</p>
           <p>Refreshes are resource-intensive.
           To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.</p>
@@ -3935,7 +4674,7 @@ class IndicesClient(NamespacedClient):
           This option ensures the indexing operation waits for a periodic refresh before running the search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-refresh.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -3946,7 +4685,7 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
         """
@@ -4001,6 +4740,7 @@ class IndicesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        resource: t.Optional[str] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -4018,7 +4758,7 @@ class IndicesClient(NamespacedClient):
           This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-reload-analyzers.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers>`_
 
         :param index: A comma-separated list of index names to reload analyzers for
         :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves
@@ -4028,6 +4768,7 @@ class IndicesClient(NamespacedClient):
             that are open, closed or both.
         :param ignore_unavailable: Whether specified concrete indices should be ignored
             when unavailable (missing or closed)
+        :param resource: Changed resource to reload analyzers from if applicable
         """
         if index in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'index'")
@@ -4048,6 +4789,8 @@ class IndicesClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
+        if resource is not None:
+            __query["resource"] = resource
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -4059,10 +4802,109 @@ class IndicesClient(NamespacedClient):
         )
 
     @_rewrite_parameters()
+    def remove_block(
+        self,
+        *,
+        index: str,
+        block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]],
+        allow_no_indices: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        ignore_unavailable: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Remove an index block.</p>
+          <p>Remove an index block from an index.
+          Index blocks limit the operations allowed on an index by blocking specific operation types.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block>`_
+
+        :param index: A comma-separated list or wildcard expression of index names used
+            to limit the request. By default, you must explicitly name the indices you
+            are removing blocks from. To allow the removal of blocks from indices with
+            `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name`
+            setting to `false`. You can update this setting in the `elasticsearch.yml`
+            file or by using the cluster update settings API.
+        :param block: The block type to remove from the index.
+        :param allow_no_indices: If `false`, the request returns an error if any wildcard
+            expression, index alias, or `_all` value targets only missing or closed indices.
+            This behavior applies even if the request targets other open indices. For
+            example, a request targeting `foo*,bar*` returns an error if an index starts
+            with `foo` but no index starts with `bar`.
+        :param expand_wildcards: The type of index that wildcard patterns can match.
+            If the request can target data streams, this argument determines whether
+            wildcard expressions match hidden data streams. It supports comma-separated
+            values, such as `open,hidden`.
+        :param ignore_unavailable: If `false`, the request returns an error if it targets
+            a missing or closed index.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. It can also be set to `-1` to indicate that the request should
+            never timeout.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. It can
+            also be set to `-1` to indicate that the request should never timeout.
+        """
+        if index in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'index'")
+        if block in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'block'")
+        __path_parts: t.Dict[str, str] = {
+            "index": _quote(index),
+            "block": _quote(block),
+        }
+        __path = f'/{__path_parts["index"]}/_block/{__path_parts["block"]}'
+        __query: t.Dict[str, t.Any] = {}
+        if allow_no_indices is not None:
+            __query["allow_no_indices"] = allow_no_indices
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if expand_wildcards is not None:
+            __query["expand_wildcards"] = expand_wildcards
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if ignore_unavailable is not None:
+            __query["ignore_unavailable"] = ignore_unavailable
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "DELETE",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="indices.remove_block",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
     def resolve_cluster(
         self,
         *,
-        name: t.Union[str, t.Sequence[str]],
+        name: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         allow_no_indices: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         expand_wildcards: t.Optional[
@@ -4078,19 +4920,20 @@ class IndicesClient(NamespacedClient):
         ignore_throttled: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Resolve the cluster.
-          Resolve the specified index expressions to return information about each cluster, including the local cluster, if included.
-          Multiple patterns and remote clusters are supported.</p>
+          <p>Resolve the cluster.</p>
+          <p>Resolve the specified index expressions to return information about each cluster, including the local &quot;querying&quot; cluster, if included.
+          If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.</p>
           <p>This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.</p>
           <p>You use the same index expression with this endpoint as you would for cross-cluster search.
           Index and cluster exclusions are also supported with this endpoint.</p>
           <p>For each cluster in the index expression, information is returned about:</p>
           <ul>
-          <li>Whether the querying (&quot;local&quot;) cluster is currently connected to each remote cluster in the index expression scope.</li>
+          <li>Whether the querying (&quot;local&quot;) cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the <code>remote/info</code> endpoint.</li>
           <li>Whether each remote cluster is configured with <code>skip_unavailable</code> as <code>true</code> or <code>false</code>.</li>
           <li>Whether there are any indices, aliases, or data streams on that cluster that match the index expression.</li>
           <li>Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).</li>
@@ -4098,7 +4941,13 @@ class IndicesClient(NamespacedClient):
           </ul>
           <p>For example, <code>GET /_resolve/cluster/my-index-*,cluster*:my-index-*</code> returns information about the local cluster and all remotely configured clusters that start with the alias <code>cluster*</code>.
           Each cluster returns information about whether it has any indices, aliases or data streams that match <code>my-index-*</code>.</p>
-          <p><strong>Advantages of using this endpoint before a cross-cluster search</strong></p>
+          <h2>Note on backwards compatibility</h2>
+          <p>The ability to query without an index expression was added in version 8.18, so when
+          querying remote clusters older than that, the local cluster will send the index
+          expression <code>dummy*</code> to those remote clusters. Thus, if an errors occur, you may see a reference
+          to that index expression even though you didn't request it. If it causes a problem, you can
+          instead include an index expression like <code>*:*</code> to bypass the issue.</p>
+          <h2>Advantages of using this endpoint before a cross-cluster search</h2>
           <p>You may want to exclude a cluster or index from a search when:</p>
           <ul>
           <li>A remote cluster is not currently connected and is configured with <code>skip_unavailable=false</code>. Running a cross-cluster search under those conditions will cause the entire search to fail.</li>
@@ -4106,31 +4955,59 @@ class IndicesClient(NamespacedClient):
           <li>The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the &quot;error&quot; field in the <code>_resolve/cluster</code> response will be present. (This is also where security/permission errors will be shown.)</li>
           <li>A remote cluster is an older version that does not support the feature you want to use in your search.</li>
           </ul>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-cluster-api.html>`_
-
-        :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases,
-            and data streams to resolve. Resources on remote clusters can be specified
-            using the `<cluster>`:`<name>` syntax.
+          <h2>Test availability of remote clusters</h2>
+          <p>The <code>remote/info</code> endpoint is commonly used to test whether the &quot;local&quot; cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not.
+          The remote cluster may be available, while the local cluster is not currently connected to it.</p>
+          <p>You can use the <code>_resolve/cluster</code> API to attempt to reconnect to remote clusters.
+          For example with <code>GET _resolve/cluster</code> or <code>GET _resolve/cluster/*:*</code>.
+          The <code>connected</code> field in the response will indicate whether it was successful.
+          If a connection was (re-)established, this will also cause the <code>remote/info</code> endpoint to now indicate a connected status.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster>`_
+
+        :param name: A comma-separated list of names or index patterns for the indices,
+            aliases, and data streams to resolve. Resources on remote clusters can be
+            specified using the `<cluster>`:`<name>` syntax. Index and cluster exclusions
+            (e.g., `-cluster1:*`) are also supported. If no index expression is specified,
+            information about all remote clusters configured on the local cluster is
+            returned without doing any index matching
         :param allow_no_indices: If false, the request returns an error if any wildcard
-            expression, index alias, or _all value targets only missing or closed indices.
+            expression, index alias, or `_all` value targets only missing or closed indices.
             This behavior applies even if the request targets other open indices. For
-            example, a request targeting foo*,bar* returns an error if an index starts
-            with foo but no index starts with bar.
+            example, a request targeting `foo*,bar*` returns an error if an index starts
+            with `foo` but no index starts with `bar`. NOTE: This option is only supported
+            when specifying an index expression. You will get an error if you specify
+            index options to the `_resolve/cluster` API endpoint that takes no index
+            expression.
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
-        :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored
-            when frozen. Defaults to false.
+            as `open,hidden`. NOTE: This option is only supported when specifying an
+            index expression. You will get an error if you specify index options to the
+            `_resolve/cluster` API endpoint that takes no index expression.
+        :param ignore_throttled: If true, concrete, expanded, or aliased indices are
+            ignored when frozen. NOTE: This option is only supported when specifying
+            an index expression. You will get an error if you specify index options to
+            the `_resolve/cluster` API endpoint that takes no index expression.
         :param ignore_unavailable: If false, the request returns an error if it targets
-            a missing or closed index. Defaults to false.
+            a missing or closed index. NOTE: This option is only supported when specifying
+            an index expression. You will get an error if you specify index options to
+            the `_resolve/cluster` API endpoint that takes no index expression.
+        :param timeout: The maximum time to wait for remote clusters to respond. If a
+            remote cluster does not respond within this timeout period, the API response
+            will show the cluster as not connected and include an error message that
+            the request timed out. The default timeout is unset and the query can take
+            as long as the networking layer is configured to wait for remote clusters
+            that are not responding (typically 30 seconds).
         """
-        if name in SKIP_IN_PATH:
-            raise ValueError("Empty value passed for parameter 'name'")
-        __path_parts: t.Dict[str, str] = {"name": _quote(name)}
-        __path = f'/_resolve/cluster/{__path_parts["name"]}'
+        __path_parts: t.Dict[str, str]
+        if name not in SKIP_IN_PATH:
+            __path_parts = {"name": _quote(name)}
+            __path = f'/_resolve/cluster/{__path_parts["name"]}'
+        else:
+            __path_parts = {}
+            __path = "/_resolve/cluster"
         __query: t.Dict[str, t.Any] = {}
         if allow_no_indices is not None:
             __query["allow_no_indices"] = allow_no_indices
@@ -4148,6 +5025,8 @@ class IndicesClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -4176,7 +5055,18 @@ class IndicesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
+        mode: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str, t.Literal["logsdb", "lookup", "standard", "time_series"]
+                    ]
+                ],
+                t.Union[str, t.Literal["logsdb", "lookup", "standard", "time_series"]],
+            ]
+        ] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -4186,7 +5076,7 @@ class IndicesClient(NamespacedClient):
           Multiple patterns and remote clusters are supported.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-resolve-index-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index>`_
 
         :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases,
             and data streams to resolve. Resources on remote clusters can be specified
@@ -4199,9 +5089,15 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
+        :param mode: Filter indices by index mode - standard, lookup, time_series, etc.
+            Comma-separated list of IndexMode. Empty means no filter.
+        :param project_routing: Specifies a subset of projects to target using project
+            metadata tags in a subset of Lucene query syntax. Allowed Lucene queries:
+            the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project
+            _alias:_origin _alias:*pr* Supported in serverless only.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -4220,8 +5116,12 @@ class IndicesClient(NamespacedClient):
             __query["human"] = human
         if ignore_unavailable is not None:
             __query["ignore_unavailable"] = ignore_unavailable
+        if mode is not None:
+            __query["mode"] = mode
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -4246,6 +5146,7 @@ class IndicesClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        lazy: t.Optional[bool] = None,
         mappings: t.Optional[t.Mapping[str, t.Any]] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
@@ -4260,7 +5161,7 @@ class IndicesClient(NamespacedClient):
         .. raw:: html
 
           <p>Roll over to a new index.
-          TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.</p>
+          TIP: We recommend using the index lifecycle rollover action to automate rollovers. However, Serverless does not support Index Lifecycle Management (ILM), so don't use this approach in the Serverless context.</p>
           <p>The rollover API creates a new index for a data stream or index alias.
           The API behavior depends on the rollover target.</p>
           <p><strong>Roll over a data stream</strong></p>
@@ -4287,7 +5188,7 @@ class IndicesClient(NamespacedClient):
           If you roll over the alias on May 7, 2099, the new index's name is <code>my-index-2099.05.07-000002</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-rollover-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover>`_
 
         :param alias: Name of the data stream or index alias to roll over.
         :param new_index: Name of the index to create. Supports date math. Data streams
@@ -4302,6 +5203,9 @@ class IndicesClient(NamespacedClient):
             conditions are satisfied.
         :param dry_run: If `true`, checks whether the current index satisfies the specified
             conditions but does not perform a rollover.
+        :param lazy: If set to true, the rollover action will only mark a data stream
+            to signal that it needs to be rolled over at the next write. Only allowed
+            on data streams.
         :param mappings: Mapping for fields in the index. If specified, this mapping
             can include field names, field data types, and mapping paramaters.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -4336,6 +5240,8 @@ class IndicesClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if lazy is not None:
+            __query["lazy"] = lazy
         if master_timeout is not None:
             __query["master_timeout"] = master_timeout
         if pretty is not None:
@@ -4387,7 +5293,6 @@ class IndicesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         ignore_unavailable: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
-        verbose: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -4397,7 +5302,7 @@ class IndicesClient(NamespacedClient):
           For data streams, the API returns information about the stream's backing indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-segments.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases used
             to limit the request. Supports wildcards (`*`). To target all data streams
@@ -4408,10 +5313,9 @@ class IndicesClient(NamespacedClient):
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
             a missing or closed index.
-        :param verbose: If `true`, the request returns a verbose response.
         """
         __path_parts: t.Dict[str, str]
         if index not in SKIP_IN_PATH:
@@ -4435,8 +5339,6 @@ class IndicesClient(NamespacedClient):
             __query["ignore_unavailable"] = ignore_unavailable
         if pretty is not None:
             __query["pretty"] = pretty
-        if verbose is not None:
-            __query["verbose"] = verbose
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -4489,7 +5391,7 @@ class IndicesClient(NamespacedClient):
           <p>By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shards-stores.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores>`_
 
         :param index: List of data streams, indices, and aliases used to limit the request.
         :param allow_no_indices: If false, the request returns an error if any wildcard
@@ -4591,7 +5493,7 @@ class IndicesClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-shrink-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink>`_
 
         :param index: Name of the source index to shrink.
         :param target: Name of the target index to create.
@@ -4636,11 +5538,7 @@ class IndicesClient(NamespacedClient):
                 __body["aliases"] = aliases
             if settings is not None:
                 __body["settings"] = settings
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -4651,15 +5549,21 @@ class IndicesClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
-    @_rewrite_parameters()
+    @_rewrite_parameters(
+        body_name="index_template",
+    )
     def simulate_index_template(
         self,
         *,
         name: str,
+        cause: t.Optional[str] = None,
+        create: t.Optional[bool] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         include_defaults: t.Optional[bool] = None,
+        index_template: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Mapping[str, t.Any]] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
@@ -4670,20 +5574,31 @@ class IndicesClient(NamespacedClient):
           Get the index configuration that would be applied to the specified index from an existing index template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template>`_
 
         :param name: Name of the index to simulate
+        :param cause: User defined reason for dry-run creating the new template for simulation
+            purposes
+        :param create: Whether the index template we optionally defined in the body should
+            only be dry-run added if new or can also replace an existing one
         :param include_defaults: If true, returns all relevant default configurations
             for the index template.
+        :param index_template:
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
+        if index_template is not None and body is not None:
+            raise ValueError("Cannot set both 'index_template' and 'body'")
         __path_parts: t.Dict[str, str] = {"name": _quote(name)}
         __path = f'/_index_template/_simulate_index/{__path_parts["name"]}'
         __query: t.Dict[str, t.Any] = {}
+        if cause is not None:
+            __query["cause"] = cause
+        if create is not None:
+            __query["create"] = create
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -4696,12 +5611,18 @@ class IndicesClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        __body = index_template if index_template is not None else body
+        if not __body:
+            __body = None
         __headers = {"accept": "application/json"}
+        if __body is not None:
+            __headers["content-type"] = "application/json"
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
             params=__query,
             headers=__headers,
+            body=__body,
             endpoint_id="indices.simulate_index_template",
             path_parts=__path_parts,
         )
@@ -4726,6 +5647,7 @@ class IndicesClient(NamespacedClient):
         *,
         name: t.Optional[str] = None,
         allow_auto_create: t.Optional[bool] = None,
+        cause: t.Optional[str] = None,
         composed_of: t.Optional[t.Sequence[str]] = None,
         create: t.Optional[bool] = None,
         data_stream: t.Optional[t.Mapping[str, t.Any]] = None,
@@ -4751,7 +5673,7 @@ class IndicesClient(NamespacedClient):
           Get the index configuration that would be applied by a particular index template.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-simulate-template.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template>`_
 
         :param name: Name of the index template to simulate. To test a template configuration
             before you add it to the cluster, omit this parameter and specify the template
@@ -4762,6 +5684,8 @@ class IndicesClient(NamespacedClient):
             via `actions.auto_create_index`. If set to `false`, then indices or data
             streams matching the template must always be explicitly created, and may
             never be automatically created.
+        :param cause: User defined reason for dry-run creating the new template for simulation
+            purposes
         :param composed_of: An ordered list of component template names. Component templates
             are merged in the order specified, meaning that the last component template
             specified has the highest precedence.
@@ -4806,6 +5730,8 @@ class IndicesClient(NamespacedClient):
             __path = "/_index_template/_simulate"
         __query: t.Dict[str, t.Any] = {}
         __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if cause is not None:
+            __query["cause"] = cause
         if create is not None:
             __query["create"] = create
         if error_trace is not None:
@@ -4919,7 +5845,7 @@ class IndicesClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-split-index.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split>`_
 
         :param index: Name of the source index to split.
         :param target: Name of the target index to create.
@@ -4964,11 +5890,7 @@ class IndicesClient(NamespacedClient):
                 __body["aliases"] = aliases
             if settings is not None:
                 __body["settings"] = settings
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -5021,7 +5943,7 @@ class IndicesClient(NamespacedClient):
           Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats>`_
 
         :param index: A comma-separated list of index names; use `_all` or empty string
             to perform the operation on all indices
@@ -5045,8 +5967,8 @@ class IndicesClient(NamespacedClient):
             are requested).
         :param include_unloaded_segments: If true, the response includes information
             from segments that are not loaded into memory.
-        :param level: Indicates whether statistics are aggregated at the cluster, index,
-            or shard level.
+        :param level: Indicates whether statistics are aggregated at the cluster, indices,
+            or shards level.
         """
         __path_parts: t.Dict[str, str]
         if index not in SKIP_IN_PATH and metric not in SKIP_IN_PATH:
@@ -5098,92 +6020,6 @@ class IndicesClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
-    @_rewrite_parameters()
-    def unfreeze(
-        self,
-        *,
-        index: str,
-        allow_no_indices: t.Optional[bool] = None,
-        error_trace: t.Optional[bool] = None,
-        expand_wildcards: t.Optional[
-            t.Union[
-                t.Sequence[
-                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
-                ],
-                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
-            ]
-        ] = None,
-        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
-        human: t.Optional[bool] = None,
-        ignore_unavailable: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        pretty: t.Optional[bool] = None,
-        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        wait_for_active_shards: t.Optional[str] = None,
-    ) -> ObjectApiResponse[t.Any]:
-        """
-        .. raw:: html
-
-          <p>Unfreeze an index.
-          When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.</p>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/unfreeze-index-api.html>`_
-
-        :param index: Identifier for the index.
-        :param allow_no_indices: If `false`, the request returns an error if any wildcard
-            expression, index alias, or `_all` value targets only missing or closed indices.
-            This behavior applies even if the request targets other open indices.
-        :param expand_wildcards: Type of index that wildcard patterns can match. If the
-            request can target data streams, this argument determines whether wildcard
-            expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
-        :param ignore_unavailable: If `false`, the request returns an error if it targets
-            a missing or closed index.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
-        :param wait_for_active_shards: The number of shard copies that must be active
-            before proceeding with the operation. Set to `all` or any positive integer
-            up to the total number of shards in the index (`number_of_replicas+1`).
-        """
-        if index in SKIP_IN_PATH:
-            raise ValueError("Empty value passed for parameter 'index'")
-        __path_parts: t.Dict[str, str] = {"index": _quote(index)}
-        __path = f'/{__path_parts["index"]}/_unfreeze'
-        __query: t.Dict[str, t.Any] = {}
-        if allow_no_indices is not None:
-            __query["allow_no_indices"] = allow_no_indices
-        if error_trace is not None:
-            __query["error_trace"] = error_trace
-        if expand_wildcards is not None:
-            __query["expand_wildcards"] = expand_wildcards
-        if filter_path is not None:
-            __query["filter_path"] = filter_path
-        if human is not None:
-            __query["human"] = human
-        if ignore_unavailable is not None:
-            __query["ignore_unavailable"] = ignore_unavailable
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
-        if pretty is not None:
-            __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
-        if wait_for_active_shards is not None:
-            __query["wait_for_active_shards"] = wait_for_active_shards
-        __headers = {"accept": "application/json"}
-        return self.perform_request(  # type: ignore[return-value]
-            "POST",
-            __path,
-            params=__query,
-            headers=__headers,
-            endpoint_id="indices.unfreeze",
-            path_parts=__path_parts,
-        )
-
     @_rewrite_parameters(
         body_fields=("actions",),
     )
@@ -5206,7 +6042,7 @@ class IndicesClient(NamespacedClient):
           Adds a data stream or index to an alias.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/indices-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases>`_
 
         :param actions: Actions to perform.
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -5285,7 +6121,7 @@ class IndicesClient(NamespacedClient):
           Validates a query without running it.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-validate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query>`_
 
         :param index: Comma-separated list of data streams, indices, and aliases to search.
             Supports wildcards (`*`). To search all data streams or indices, omit this
@@ -5298,15 +6134,15 @@ class IndicesClient(NamespacedClient):
         :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed.
         :param analyzer: Analyzer to use for the query string. This parameter can only
             be used when the `q` query string parameter is specified.
-        :param default_operator: The default operator for query string query: `AND` or
-            `OR`.
+        :param default_operator: The default operator for query string query: `and` or
+            `or`.
         :param df: Field to use as default where no field prefix is given in the query
             string. This parameter can only be used when the `q` query string parameter
             is specified.
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
             expressions match hidden data streams. Supports comma-separated values, such
-            as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.
+            as `open,hidden`.
         :param explain: If `true`, the response returns detailed information if an error
             has occurred.
         :param ignore_unavailable: If `false`, the request returns an error if it targets
diff -pruN 8.17.2-2/elasticsearch/_sync/client/inference.py 9.2.0-1/elasticsearch/_sync/client/inference.py
--- 8.17.2-2/elasticsearch/_sync/client/inference.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/inference.py	2025-10-28 16:50:53.000000000 +0000
@@ -25,6 +25,70 @@ from .utils import SKIP_IN_PATH, _quote,
 
 class InferenceClient(NamespacedClient):
 
+    @_rewrite_parameters(
+        body_fields=("input", "task_settings"),
+    )
+    def completion(
+        self,
+        *,
+        inference_id: str,
+        input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Any] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Perform completion inference on the service</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The inference Id
+        :param input: Inference input. Either a string or an array of strings.
+        :param task_settings: Optional task settings
+        :param timeout: Specifies the amount of time to wait for the inference request
+            to complete.
+        """
+        if inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'inference_id'")
+        if input is None and body is None:
+            raise ValueError("Empty value passed for parameter 'input'")
+        __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
+        __path = f'/_inference/completion/{__path_parts["inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if input is not None:
+                __body["input"] = input
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.completion",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters()
     def delete(
         self,
@@ -33,7 +97,13 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         dry_run: t.Optional[bool] = None,
@@ -49,14 +119,14 @@ class InferenceClient(NamespacedClient):
           <p>Delete an inference endpoint</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-inference-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete>`_
 
-        :param inference_id: The inference Id
+        :param inference_id: The inference identifier.
         :param task_type: The task type
-        :param dry_run: When true, the endpoint is not deleted, and a list of ingest
-            processors which reference this endpoint is returned
+        :param dry_run: When true, the endpoint is not deleted and a list of ingest processors
+            which reference this endpoint is returned.
         :param force: When true, the inference endpoint is forcefully deleted even if
-            it is still being used by ingest processors or semantic text fields
+            it is still being used by ingest processors or semantic text fields.
         """
         if inference_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'inference_id'")
@@ -102,7 +172,13 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         inference_id: t.Optional[str] = None,
@@ -117,7 +193,7 @@ class InferenceClient(NamespacedClient):
           <p>Get an inference endpoint</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-inference-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get>`_
 
         :param task_type: The task type
         :param inference_id: The inference Id
@@ -155,7 +231,7 @@ class InferenceClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("input", "query", "task_settings"),
+        body_fields=("input", "input_type", "query", "task_settings"),
     )
     def inference(
         self,
@@ -165,12 +241,19 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        input_type: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
         query: t.Optional[str] = None,
         task_settings: t.Optional[t.Any] = None,
@@ -180,18 +263,39 @@ class InferenceClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Perform inference on the service</p>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/post-inference-api.html>`_
-
-        :param inference_id: The inference Id
-        :param input: Inference input. Either a string or an array of strings.
-        :param task_type: The task type
-        :param query: Query input, required for rerank task. Not required for other tasks.
-        :param task_settings: Optional task settings
-        :param timeout: Specifies the amount of time to wait for the inference request
-            to complete.
+          <p>Perform inference on the service.</p>
+          <p>This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.
+          It returns a response with the results of the tasks.
+          The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.</p>
+          <p>For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.</p>
+          <blockquote>
+          <p>info
+          The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
+          </blockquote>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The unique identifier for the inference endpoint.
+        :param input: The text on which you want to perform the inference task. It can
+            be a single string or an array. > info > Inference endpoints for the `completion`
+            task type currently only support a single string as input.
+        :param task_type: The type of inference task that the model performs.
+        :param input_type: Specifies the input data type for the text embedding model.
+            The `input_type` parameter only applies to Inference Endpoints with the `text_embedding`
+            task type. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION`
+            * `CLUSTERING` Not all services support all values. Unsupported values will
+            trigger a validation exception. Accepted values depend on the configured
+            inference service, refer to the relevant service-specific documentation for
+            more info. > info > The `input_type` parameter specified on the root level
+            of the request body will take precedence over the `input_type` parameter
+            specified in `task_settings`.
+        :param query: The query input, which is required only for the `rerank` task.
+            It is not required for other tasks.
+        :param task_settings: Task settings for the individual inference request. These
+            settings are specific to the task type you specified and override the task
+            settings specified when initializing the service.
+        :param timeout: The amount of time to wait for the inference request to complete.
         """
         if inference_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'inference_id'")
@@ -224,15 +328,13 @@ class InferenceClient(NamespacedClient):
         if not __body:
             if input is not None:
                 __body["input"] = input
+            if input_type is not None:
+                __body["input_type"] = input_type
             if query is not None:
                 __body["query"] = query
             if task_settings is not None:
                 __body["task_settings"] = task_settings
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
@@ -255,33 +357,61 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Create an inference endpoint.
-          When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
-          After creating the endpoint, wait for the model deployment to complete before using it.
-          To verify the deployment status, use the get trained model statistics API.
-          Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
-          Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
+          <p>Create an inference endpoint.</p>
           <p>IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
           For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.
           However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
+          <p>The following integrations are available through the inference API. You can find the available task types next to the integration name:</p>
+          <ul>
+          <li>AI21 (<code>chat_completion</code>, <code>completion</code>)</li>
+          <li>AlibabaCloud AI Search (<code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
+          <li>Amazon Bedrock (<code>completion</code>, <code>text_embedding</code>)</li>
+          <li>Amazon SageMaker (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
+          <li>Anthropic (<code>completion</code>)</li>
+          <li>Azure AI Studio (<code>completion</code>, 'rerank', <code>text_embedding</code>)</li>
+          <li>Azure OpenAI (<code>completion</code>, <code>text_embedding</code>)</li>
+          <li>Cohere (<code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>DeepSeek (<code>chat_completion</code>, <code>completion</code>)</li>
+          <li>Elasticsearch (<code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code> - this service is for built-in models and models uploaded through Eland)</li>
+          <li>ELSER (<code>sparse_embedding</code>)</li>
+          <li>Google AI Studio (<code>completion</code>, <code>text_embedding</code>)</li>
+          <li>Google Vertex AI (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>Hugging Face (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>JinaAI (<code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>Llama (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
+          <li>Mistral (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
+          <li>OpenAI (<code>chat_completion</code>, <code>completion</code>, <code>text_embedding</code>)</li>
+          <li>VoyageAI (<code>rerank</code>, <code>text_embedding</code>)</li>
+          <li>Watsonx inference integration (<code>text_embedding</code>)</li>
+          </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-inference-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put>`_
 
         :param inference_id: The inference Id
         :param inference_config:
-        :param task_type: The task type
+        :param task_type: The task type. Refer to the integration list in the API description
+            for the available task types.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
         """
         if inference_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'inference_id'")
@@ -312,6 +442,8 @@ class InferenceClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __body = inference_config if inference_config is not None else body
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
@@ -325,6 +457,2288 @@ class InferenceClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
+        body_fields=("service", "service_settings"),
+    )
+    def put_ai21(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["chat_completion", "completion"]],
+        ai21_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["ai21"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a AI21 inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>ai21</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-ai21>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param ai21_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `ai21`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `ai21` service.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if ai21_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'ai21_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "ai21_inference_id": _quote(ai21_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["ai21_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_ai21",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_alibabacloud(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"]
+        ],
+        alibabacloud_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an AlibabaCloud AI Search inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>alibabacloud-ai-search</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param alibabacloud_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `alibabacloud-ai-search`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `alibabacloud-ai-search` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if alibabacloud_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'alibabacloud_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "alibabacloud_inference_id": _quote(alibabacloud_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_alibabacloud",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_amazonbedrock(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
+        amazonbedrock_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Amazon Bedrock inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>amazonbedrock</code> service.</p>
+          <blockquote>
+          <p>info
+          You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.</p>
+          </blockquote>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param amazonbedrock_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `amazonbedrock`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `amazonbedrock` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if amazonbedrock_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'amazonbedrock_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_amazonbedrock",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_amazonsagemaker(
+        self,
+        *,
+        task_type: t.Union[
+            str,
+            t.Literal[
+                "chat_completion",
+                "completion",
+                "rerank",
+                "sparse_embedding",
+                "text_embedding",
+            ],
+        ],
+        amazonsagemaker_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["amazon_sagemaker"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Amazon SageMaker inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>amazon_sagemaker</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param amazonsagemaker_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `amazon_sagemaker`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `amazon_sagemaker` service and `service_settings.api`
+            you specified.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type and `service_settings.api` you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if amazonsagemaker_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'amazonsagemaker_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "amazonsagemaker_inference_id": _quote(amazonsagemaker_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonsagemaker_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_amazonsagemaker",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_anthropic(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion"]],
+        anthropic_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Anthropic inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>anthropic</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic>`_
+
+        :param task_type: The task type. The only valid task type for the model to perform
+            is `completion`.
+        :param anthropic_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `anthropic`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `watsonxai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if anthropic_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'anthropic_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "anthropic_inference_id": _quote(anthropic_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_anthropic",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_azureaistudio(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]],
+        azureaistudio_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Azure AI studio inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>azureaistudio</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param azureaistudio_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `azureaistudio`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `openai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if azureaistudio_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'azureaistudio_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "azureaistudio_inference_id": _quote(azureaistudio_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_azureaistudio",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_azureopenai(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
+        azureopenai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Azure OpenAI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>azureopenai</code> service.</p>
+          <p>The list of chat completion models that you can choose from in your Azure OpenAI deployment include:</p>
+          <ul>
+          <li><a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models">GPT-4 and GPT-4 Turbo models</a></li>
+          <li><a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35">GPT-3.5</a></li>
+          </ul>
+          <p>The list of embeddings models that you can choose from in your deployment can be found in the <a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings">Azure models documentation</a>.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+            NOTE: The `chat_completion` task type only supports streaming and only through
+            the _stream API.
+        :param azureopenai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `azureopenai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `azureopenai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if azureopenai_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'azureopenai_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "azureopenai_inference_id": _quote(azureopenai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_azureopenai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_cohere(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]],
+        cohere_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Cohere inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>cohere</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param cohere_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `cohere`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `cohere` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if cohere_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'cohere_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "cohere_inference_id": _quote(cohere_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_cohere",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_contextualai(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["rerank"]],
+        contextualai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["contextualai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Contextual AI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>contexualai</code> service.</p>
+          <p>To review the available <code>rerank</code> models, refer to <a href="https://docs.contextual.ai/api-reference/rerank/rerank#body-model">https://docs.contextual.ai/api-reference/rerank/rerank#body-model</a>.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-contextualai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param contextualai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `contextualai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `contextualai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if contextualai_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'contextualai_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "contextualai_inference_id": _quote(contextualai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["contextualai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_contextualai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_custom(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"]
+        ],
+        custom_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["custom"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a custom inference endpoint.</p>
+          <p>The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations.
+          The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets.
+          The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key.
+          Templates are portions of a string that start with <code>${</code> and end with <code>}</code>.
+          The parameters <code>secret_parameters</code> and <code>task_settings</code> are checked for keys for template replacement. Template replacement is supported in the <code>request</code>, <code>headers</code>, <code>url</code>, and <code>query_parameters</code>.
+          If the definition (key) is not found for a template, an error message is returned.
+          In case of an endpoint definition like the following:</p>
+          <pre><code>PUT _inference/text_embedding/test-text-embedding
+          {
+            &quot;service&quot;: &quot;custom&quot;,
+            &quot;service_settings&quot;: {
+               &quot;secret_parameters&quot;: {
+                    &quot;api_key&quot;: &quot;&lt;some api key&gt;&quot;
+               },
+               &quot;url&quot;: &quot;...endpoints.huggingface.cloud/v1/embeddings&quot;,
+               &quot;headers&quot;: {
+                   &quot;Authorization&quot;: &quot;Bearer ${api_key}&quot;,
+                   &quot;Content-Type&quot;: &quot;application/json&quot;
+               },
+               &quot;request&quot;: &quot;{\\&quot;input\\&quot;: ${input}}&quot;,
+               &quot;response&quot;: {
+                   &quot;json_parser&quot;: {
+                       &quot;text_embeddings&quot;:&quot;$.data[*].embedding[*]&quot;
+                   }
+               }
+            }
+          }
+          </code></pre>
+          <p>To replace <code>${api_key}</code> the <code>secret_parameters</code> and <code>task_settings</code> are checked for a key named <code>api_key</code>.</p>
+          <blockquote>
+          <p>info
+          Templates should not be surrounded by quotes.</p>
+          </blockquote>
+          <p>Pre-defined templates:</p>
+          <ul>
+          <li><code>${input}</code> refers to the array of input strings that comes from the <code>input</code> field of the subsequent inference requests.</li>
+          <li><code>${input_type}</code> refers to the input type translation values.</li>
+          <li><code>${query}</code> refers to the query field used specifically for reranking tasks.</li>
+          <li><code>${top_n}</code> refers to the <code>top_n</code> field available when performing rerank requests.</li>
+          <li><code>${return_documents}</code> refers to the <code>return_documents</code> field available when performing rerank requests.</li>
+          </ul>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param custom_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `custom`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `custom` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if custom_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'custom_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "custom_inference_id": _quote(custom_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["custom_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_custom",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    def put_deepseek(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["chat_completion", "completion"]],
+        deepseek_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a DeepSeek inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>deepseek</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param deepseek_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `deepseek`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `deepseek` service.
+        :param chunking_settings: The chunking configuration object.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if deepseek_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'deepseek_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "deepseek_inference_id": _quote(deepseek_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["deepseek_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_deepseek",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_elasticsearch(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["rerank", "sparse_embedding", "text_embedding"]
+        ],
+        elasticsearch_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Elasticsearch inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>elasticsearch</code> service.</p>
+          <blockquote>
+          <p>info
+          Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.</p>
+          </blockquote>
+          <p>If you use the ELSER or the E5 model through the <code>elasticsearch</code> service, the API request will automatically download and deploy the model if it isn't downloaded yet.</p>
+          <blockquote>
+          <p>info
+          You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.</p>
+          </blockquote>
+          <p>After creating the endpoint, wait for the model deployment to complete before using it.
+          To verify the deployment status, use the get trained model statistics API.
+          Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
+          Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param elasticsearch_inference_id: The unique identifier of the inference endpoint.
+            The must not match the `model_id`.
+        :param service: The type of service supported for the specified task type. In
+            this case, `elasticsearch`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `elasticsearch` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if elasticsearch_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'elasticsearch_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "elasticsearch_inference_id": _quote(elasticsearch_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_elasticsearch",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    def put_elser(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["sparse_embedding"]],
+        elser_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["elser"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an ELSER inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>elser</code> service.
+          You can also deploy ELSER by using the Elasticsearch inference integration.</p>
+          <blockquote>
+          <p>info
+          Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.</p>
+          </blockquote>
+          <p>The API request will automatically download and deploy the ELSER model if it isn't already downloaded.</p>
+          <blockquote>
+          <p>info
+          You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.</p>
+          </blockquote>
+          <p>After creating the endpoint, wait for the model deployment to complete before using it.
+          To verify the deployment status, use the get trained model statistics API.
+          Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
+          Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param elser_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `elser`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `elser` service.
+        :param chunking_settings: The chunking configuration object. Note that for ELSER
+            endpoints, the max_chunk_size may not exceed `300`.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if elser_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'elser_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "elser_inference_id": _quote(elser_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_elser",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    def put_googleaistudio(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["completion", "text_embedding"]],
+        googleaistudio_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an Google AI Studio inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>googleaistudio</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param googleaistudio_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `googleaistudio`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `googleaistudio` service.
+        :param chunking_settings: The chunking configuration object.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if googleaistudio_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'googleaistudio_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "googleaistudio_inference_id": _quote(googleaistudio_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_googleaistudio",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_googlevertexai(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"]
+        ],
+        googlevertexai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Google Vertex AI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>googlevertexai</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param googlevertexai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `googlevertexai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `googlevertexai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if googlevertexai_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'googlevertexai_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "googlevertexai_inference_id": _quote(googlevertexai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_googlevertexai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_hugging_face(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"]
+        ],
+        huggingface_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Hugging Face inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>hugging_face</code> service.
+          Supported tasks include: <code>text_embedding</code>, <code>completion</code>, and <code>chat_completion</code>.</p>
+          <p>To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint.
+          Select a model that supports the task you intend to use.</p>
+          <p>For Elastic's <code>text_embedding</code> task:
+          The selected model must support the <code>Sentence Embeddings</code> task. On the new endpoint creation page, select the <code>Sentence Embeddings</code> task under the <code>Advanced Configuration</code> section.
+          After the endpoint has initialized, copy the generated endpoint URL.
+          Recommended models for <code>text_embedding</code> task:</p>
+          <ul>
+          <li><code>all-MiniLM-L6-v2</code></li>
+          <li><code>all-MiniLM-L12-v2</code></li>
+          <li><code>all-mpnet-base-v2</code></li>
+          <li><code>e5-base-v2</code></li>
+          <li><code>e5-small-v2</code></li>
+          <li><code>multilingual-e5-base</code></li>
+          <li><code>multilingual-e5-small</code></li>
+          </ul>
+          <p>For Elastic's <code>chat_completion</code> and <code>completion</code> tasks:
+          The selected model must support the <code>Text Generation</code> task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for <code>Text Generation</code>. When creating dedicated endpoint select the <code>Text Generation</code> task.
+          After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes <code>/v1/chat/completions</code> part in URL. Then, copy the full endpoint URL for use.
+          Recommended models for <code>chat_completion</code> and <code>completion</code> tasks:</p>
+          <ul>
+          <li><code>Mistral-7B-Instruct-v0.2</code></li>
+          <li><code>QwQ-32B</code></li>
+          <li><code>Phi-3-mini-128k-instruct</code></li>
+          </ul>
+          <p>For Elastic's <code>rerank</code> task:
+          The selected model must support the <code>sentence-ranking</code> task and expose OpenAI API.
+          HuggingFace supports only dedicated (not serverless) endpoints for <code>Rerank</code> so far.
+          After the endpoint is initialized, copy the full endpoint URL for use.
+          Tested models for <code>rerank</code> task:</p>
+          <ul>
+          <li><code>bge-reranker-base</code></li>
+          <li><code>jina-reranker-v1-turbo-en-GGUF</code></li>
+          </ul>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param huggingface_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `hugging_face`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `hugging_face` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if huggingface_inference_id in SKIP_IN_PATH:
+            raise ValueError(
+                "Empty value passed for parameter 'huggingface_inference_id'"
+            )
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "huggingface_inference_id": _quote(huggingface_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_hugging_face",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_jinaai(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["rerank", "text_embedding"]],
+        jinaai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an JinaAI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>jinaai</code> service.</p>
+          <p>To review the available <code>rerank</code> models, refer to <a href="https://jina.ai/reranker">https://jina.ai/reranker</a>.
+          To review the available <code>text_embedding</code> models, refer to the <a href="https://jina.ai/embeddings/">https://jina.ai/embeddings/</a>.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param jinaai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `jinaai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `jinaai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if jinaai_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'jinaai_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "jinaai_inference_id": _quote(jinaai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_jinaai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    def put_llama(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "text_embedding"]
+        ],
+        llama_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["llama"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Llama inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>llama</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-llama>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param llama_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `llama`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `llama` service.
+        :param chunking_settings: The chunking configuration object.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if llama_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'llama_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "llama_inference_id": _quote(llama_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["llama_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_llama",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings", "chunking_settings"),
+    )
+    def put_mistral(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "text_embedding"]
+        ],
+        mistral_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Mistral inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>mistral</code> service.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param mistral_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `mistral`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `mistral` service.
+        :param chunking_settings: The chunking configuration object.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if mistral_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'mistral_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "mistral_inference_id": _quote(mistral_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_mistral",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_openai(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "text_embedding"]
+        ],
+        openai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["openai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create an OpenAI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>openai</code> service or <code>openai</code> compatible APIs.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+            NOTE: The `chat_completion` task type only supports streaming and only through
+            the _stream API.
+        :param openai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `openai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `openai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if openai_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'openai_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "openai_inference_id": _quote(openai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_openai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=(
+            "service",
+            "service_settings",
+            "chunking_settings",
+            "task_settings",
+        ),
+    )
+    def put_voyageai(
+        self,
+        *,
+        task_type: t.Union[str, t.Literal["rerank", "text_embedding"]],
+        voyageai_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a VoyageAI inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>voyageai</code> service.</p>
+          <p>Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-voyageai>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param voyageai_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `voyageai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `voyageai` service.
+        :param chunking_settings: The chunking configuration object.
+        :param task_settings: Settings to configure the inference task. These settings
+            are specific to the task type you specified.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if voyageai_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'voyageai_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "voyageai_inference_id": _quote(voyageai_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+            if chunking_settings is not None:
+                __body["chunking_settings"] = chunking_settings
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_voyageai",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("service", "service_settings"),
+    )
+    def put_watsonx(
+        self,
+        *,
+        task_type: t.Union[
+            str, t.Literal["chat_completion", "completion", "text_embedding"]
+        ],
+        watsonx_inference_id: str,
+        service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None,
+        service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Create a Watsonx inference endpoint.</p>
+          <p>Create an inference endpoint to perform an inference task with the <code>watsonxai</code> service.
+          You need an IBM Cloud Databases for Elasticsearch deployment to use the <code>watsonxai</code> inference service.
+          You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx>`_
+
+        :param task_type: The type of the inference task that the model will perform.
+        :param watsonx_inference_id: The unique identifier of the inference endpoint.
+        :param service: The type of service supported for the specified task type. In
+            this case, `watsonxai`.
+        :param service_settings: Settings used to install the inference model. These
+            settings are specific to the `watsonxai` service.
+        :param timeout: Specifies the amount of time to wait for the inference endpoint
+            to be created.
+        """
+        if task_type in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'task_type'")
+        if watsonx_inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'watsonx_inference_id'")
+        if service is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service'")
+        if service_settings is None and body is None:
+            raise ValueError("Empty value passed for parameter 'service_settings'")
+        __path_parts: t.Dict[str, str] = {
+            "task_type": _quote(task_type),
+            "watsonx_inference_id": _quote(watsonx_inference_id),
+        }
+        __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if service is not None:
+                __body["service"] = service
+            if service_settings is not None:
+                __body["service_settings"] = service_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "PUT",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.put_watsonx",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("input", "query", "task_settings"),
+    )
+    def rerank(
+        self,
+        *,
+        inference_id: str,
+        input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        query: t.Optional[str] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Any] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Perform reranking inference on the service</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The unique identifier for the inference endpoint.
+        :param input: The text on which you want to perform the inference task. It can
+            be a single string or an array. > info > Inference endpoints for the `completion`
+            task type currently only support a single string as input.
+        :param query: Query input.
+        :param task_settings: Task settings for the individual inference request. These
+            settings are specific to the task type you specified and override the task
+            settings specified when initializing the service.
+        :param timeout: The amount of time to wait for the inference request to complete.
+        """
+        if inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'inference_id'")
+        if input is None and body is None:
+            raise ValueError("Empty value passed for parameter 'input'")
+        if query is None and body is None:
+            raise ValueError("Empty value passed for parameter 'query'")
+        __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
+        __path = f'/_inference/rerank/{__path_parts["inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if input is not None:
+                __body["input"] = input
+            if query is not None:
+                __body["query"] = query
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.rerank",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("input", "task_settings"),
+    )
+    def sparse_embedding(
+        self,
+        *,
+        inference_id: str,
+        input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Any] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Perform sparse embedding inference on the service</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The inference Id
+        :param input: Inference input. Either a string or an array of strings.
+        :param task_settings: Optional task settings
+        :param timeout: Specifies the amount of time to wait for the inference request
+            to complete.
+        """
+        if inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'inference_id'")
+        if input is None and body is None:
+            raise ValueError("Empty value passed for parameter 'input'")
+        __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
+        __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if input is not None:
+                __body["input"] = input
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.sparse_embedding",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
+        body_fields=("input", "input_type", "task_settings"),
+    )
+    def text_embedding(
+        self,
+        *,
+        inference_id: str,
+        input: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        input_type: t.Optional[str] = None,
+        pretty: t.Optional[bool] = None,
+        task_settings: t.Optional[t.Any] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Perform text embedding inference on the service</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference>`_
+
+        :param inference_id: The inference Id
+        :param input: Inference input. Either a string or an array of strings.
+        :param input_type: The input data type for the text embedding model. Possible
+            values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` * `CLUSTERING` Not
+            all services support all values. Unsupported values will trigger a validation
+            exception. Accepted values depend on the configured inference service, refer
+            to the relevant service-specific documentation for more info. > info > The
+            `input_type` parameter specified on the root level of the request body will
+            take precedence over the `input_type` parameter specified in `task_settings`.
+        :param task_settings: Optional task settings
+        :param timeout: Specifies the amount of time to wait for the inference request
+            to complete.
+        """
+        if inference_id in SKIP_IN_PATH:
+            raise ValueError("Empty value passed for parameter 'inference_id'")
+        if input is None and body is None:
+            raise ValueError("Empty value passed for parameter 'input'")
+        __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)}
+        __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}'
+        __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        if not __body:
+            if input is not None:
+                __body["input"] = input
+            if input_type is not None:
+                __body["input_type"] = input_type
+            if task_settings is not None:
+                __body["task_settings"] = task_settings
+        __headers = {"accept": "application/json", "content-type": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            body=__body,
+            endpoint_id="inference.text_embedding",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters(
         body_name="inference_config",
     )
     def update(
@@ -336,7 +2750,13 @@ class InferenceClient(NamespacedClient):
         task_type: t.Optional[
             t.Union[
                 str,
-                t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"],
+                t.Literal[
+                    "chat_completion",
+                    "completion",
+                    "rerank",
+                    "sparse_embedding",
+                    "text_embedding",
+                ],
             ]
         ] = None,
         error_trace: t.Optional[bool] = None,
@@ -354,7 +2774,7 @@ class InferenceClient(NamespacedClient):
           However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-inference-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update>`_
 
         :param inference_id: The unique identifier of the inference endpoint.
         :param inference_config:
@@ -392,7 +2812,7 @@ class InferenceClient(NamespacedClient):
         __body = inference_config if inference_config is not None else body
         __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
-            "POST",
+            "PUT",
             __path,
             params=__query,
             headers=__headers,
diff -pruN 8.17.2-2/elasticsearch/_sync/client/ingest.py 9.2.0-1/elasticsearch/_sync/client/ingest.py
--- 8.17.2-2/elasticsearch/_sync/client/ingest.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/ingest.py	2025-10-28 16:50:53.000000000 +0000
@@ -40,18 +40,18 @@ class IngestClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete GeoIP database configurations.
-          Delete one or more IP geolocation database configurations.</p>
+          <p>Delete GeoIP database configurations.</p>
+          <p>Delete one or more IP geolocation database configurations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-geoip-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database>`_
 
         :param id: A comma-separated list of geoip database configurations to delete
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         if id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'id'")
@@ -98,7 +98,7 @@ class IngestClient(NamespacedClient):
           <p>Delete IP geolocation database configurations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-ip-location-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database>`_
 
         :param id: A comma-separated list of IP location database configurations.
         :param master_timeout: The period to wait for a connection to the master node.
@@ -155,7 +155,7 @@ class IngestClient(NamespacedClient):
           Delete one or more ingest pipelines.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-pipeline-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline>`_
 
         :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the
             request. To delete all ingest pipelines in a cluster, use a value of `*`.
@@ -208,7 +208,7 @@ class IngestClient(NamespacedClient):
           Get download statistics for GeoIP2 databases that are used with the GeoIP processor.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/geoip-processor.html>`_
+        `<https://www.elastic.co/docs/reference/enrich-processor/geoip-processor>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ingest/geoip/stats"
@@ -239,24 +239,20 @@ class IngestClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get GeoIP database configurations.
-          Get information about one or more IP geolocation database configurations.</p>
+          <p>Get GeoIP database configurations.</p>
+          <p>Get information about one or more IP geolocation database configurations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-geoip-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database>`_
 
-        :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard
-            (`*`) expressions are supported. To get all database configurations, omit
-            this parameter or use `*`.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
+        :param id: A comma-separated list of database configuration IDs to retrieve.
+            Wildcard (`*`) expressions are supported. To get all database configurations,
+            omit this parameter or use `*`.
         """
         __path_parts: t.Dict[str, str]
         if id not in SKIP_IN_PATH:
@@ -272,8 +268,6 @@ class IngestClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -294,7 +288,6 @@ class IngestClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -303,15 +296,11 @@ class IngestClient(NamespacedClient):
           <p>Get IP geolocation database configurations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ip-location-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database>`_
 
         :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard
             (`*`) expressions are supported. To get all database configurations, omit
             this parameter or use `*`.
-        :param master_timeout: The period to wait for a connection to the master node.
-            If no response is received before the timeout expires, the request fails
-            and returns an error. A value of `-1` indicates that the request should never
-            time out.
         """
         __path_parts: t.Dict[str, str]
         if id not in SKIP_IN_PATH:
@@ -327,8 +316,6 @@ class IngestClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -356,12 +343,12 @@ class IngestClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get pipelines.
-          Get information about one or more ingest pipelines.
+          <p>Get pipelines.</p>
+          <p>Get information about one or more ingest pipelines.
           This API returns a local reference of the pipeline.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-pipeline-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline>`_
 
         :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions
             are supported. To get all ingest pipelines, omit this parameter or use `*`.
@@ -418,7 +405,7 @@ class IngestClient(NamespacedClient):
           A grok pattern is like a regular expression that supports aliased expressions that can be reused.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/grok-processor.html>`_
+        `<https://www.elastic.co/docs/reference/enrich-processor/grok-processor>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ingest/processor/grok"
@@ -461,11 +448,11 @@ class IngestClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Create or update a GeoIP database configuration.
-          Refer to the create or update IP geolocation database configuration API.</p>
+          <p>Create or update a GeoIP database configuration.</p>
+          <p>Refer to the create or update IP geolocation database configuration API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-geoip-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database>`_
 
         :param id: ID of the database configuration to create or update.
         :param maxmind: The configuration necessary to identify which IP geolocation
@@ -540,7 +527,7 @@ class IngestClient(NamespacedClient):
           <p>Create or update an IP geolocation database configuration.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-ip-location-database-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database>`_
 
         :param id: The database configuration identifier.
         :param configuration:
@@ -593,6 +580,7 @@ class IngestClient(NamespacedClient):
         body_fields=(
             "deprecated",
             "description",
+            "field_access_pattern",
             "meta",
             "on_failure",
             "processors",
@@ -607,6 +595,9 @@ class IngestClient(NamespacedClient):
         deprecated: t.Optional[bool] = None,
         description: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
+        field_access_pattern: t.Optional[
+            t.Union[str, t.Literal["classic", "flexible"]]
+        ] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         if_version: t.Optional[int] = None,
@@ -626,7 +617,7 @@ class IngestClient(NamespacedClient):
           Changes made using this API take effect immediately.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ingest.html>`_
+        `<https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines>`_
 
         :param id: ID of the ingest pipeline to create or update.
         :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated
@@ -634,6 +625,8 @@ class IngestClient(NamespacedClient):
             or updating a non-deprecated index template, Elasticsearch will emit a deprecation
             warning.
         :param description: Description of the ingest pipeline.
+        :param field_access_pattern: Controls how processors in this pipeline should
+            read and write data on a document's source.
         :param if_version: Required version for optimistic concurrency control for pipeline
             updates
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -680,6 +673,8 @@ class IngestClient(NamespacedClient):
                 __body["deprecated"] = deprecated
             if description is not None:
                 __body["description"] = description
+            if field_access_pattern is not None:
+                __body["field_access_pattern"] = field_access_pattern
             if meta is not None:
                 __body["_meta"] = meta
             if on_failure is not None:
@@ -718,17 +713,17 @@ class IngestClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Simulate a pipeline.
-          Run an ingest pipeline against a set of provided documents.
+          <p>Simulate a pipeline.</p>
+          <p>Run an ingest pipeline against a set of provided documents.
           You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/simulate-pipeline-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate>`_
 
         :param docs: Sample documents to test in the pipeline.
-        :param id: Pipeline to test. If you don’t specify a `pipeline` in the request
+        :param id: The pipeline to test. If you don't specify a `pipeline` in the request
             body, this parameter is required.
-        :param pipeline: Pipeline to test. If you don’t specify the `pipeline` request
+        :param pipeline: The pipeline to test. If you don't specify the `pipeline` request
             path parameter, this parameter is required. If you specify both this and
             the request path parameter, the API only uses the request path parameter.
         :param verbose: If `true`, the response includes output data for each processor
diff -pruN 8.17.2-2/elasticsearch/_sync/client/license.py 9.2.0-1/elasticsearch/_sync/client/license.py
--- 8.17.2-2/elasticsearch/_sync/client/license.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/license.py	2025-10-28 16:50:53.000000000 +0000
@@ -32,17 +32,23 @@ class LicenseClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Delete the license.
-          When the license expires, your subscription level reverts to Basic.</p>
+          <p>Delete the license.</p>
+          <p>When the license expires, your subscription level reverts to Basic.</p>
           <p>If the operator privileges feature is enabled, only operator users can use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-license.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete>`_
+
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license"
@@ -53,8 +59,12 @@ class LicenseClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "DELETE",
@@ -79,20 +89,25 @@ class LicenseClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get license information.
-          Get information about your Elastic license including its type, its status, when it was issued, and when it expires.</p>
-          <p>NOTE: If the master node is generating a new cluster state, the get license API may return a <code>404 Not Found</code> response.
+          <p>Get license information.</p>
+          <p>Get information about your Elastic license including its type, its status, when it was issued, and when it expires.</p>
+          <blockquote>
+          <p>info
+          If the master node is generating a new cluster state, the get license API may return a <code>404 Not Found</code> response.
           If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.</p>
+          </blockquote>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-license.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get>`_
 
         :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise
             license types. If `false`, this parameter returns platinum for both platinum
             and enterprise license types. This behavior is maintained for backwards compatibility.
             This parameter is deprecated and will always be set to true in 8.x.
-        :param local: Specifies whether to retrieve local information. The default value
-            is `false`, which means the information is retrieved from the master node.
+        :param local: Specifies whether to retrieve local information. From 9.2 onwards
+            the default value is `true`, which means the information is retrieved from
+            the responding node. In earlier versions the default is `false`, which means
+            the information is retrieved from the elected master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license"
@@ -134,7 +149,7 @@ class LicenseClient(NamespacedClient):
           <p>Get the basic license status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-basic-status.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license/basic_status"
@@ -172,7 +187,7 @@ class LicenseClient(NamespacedClient):
           <p>Get the trial status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-trial-status.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license/trial_status"
@@ -207,14 +222,16 @@ class LicenseClient(NamespacedClient):
         human: t.Optional[bool] = None,
         license: t.Optional[t.Mapping[str, t.Any]] = None,
         licenses: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Update the license.
-          You can update your license at runtime without shutting down your nodes.
+          <p>Update the license.</p>
+          <p>You can update your license at runtime without shutting down your nodes.
           License updates take effect immediately.
           If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response.
           You must then re-submit the API request with the acknowledge parameter set to true.</p>
@@ -222,12 +239,15 @@ class LicenseClient(NamespacedClient):
           If the operator privileges feature is enabled, only operator users can use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-license.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post>`_
 
         :param acknowledge: Specifies whether you acknowledge the license changes.
         :param license:
         :param licenses: A sequence of one or more JSON documents containing the license
             information.
+        :param master_timeout: The period to wait for a connection to the master node.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license"
@@ -241,8 +261,12 @@ class LicenseClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         if not __body:
             if license is not None:
                 __body["license"] = license
@@ -271,23 +295,28 @@ class LicenseClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Start a basic license.
-          Start an indefinite basic license, which gives access to all the basic features.</p>
+          <p>Start a basic license.</p>
+          <p>Start an indefinite basic license, which gives access to all the basic features.</p>
           <p>NOTE: In order to start a basic license, you must not currently have a basic license.</p>
           <p>If the basic license does not support all of the features that are available with your current license, however, you are notified in the response.
           You must then re-submit the API request with the <code>acknowledge</code> parameter set to <code>true</code>.</p>
           <p>To check the status of your basic license, use the get basic license API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-basic.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic>`_
 
         :param acknowledge: whether the user has acknowledged acknowledge messages (default:
             false)
+        :param master_timeout: Period to wait for a connection to the master node.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license/start_basic"
@@ -300,8 +329,12 @@ class LicenseClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -320,8 +353,9 @@ class LicenseClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        type_query_string: t.Optional[str] = None,
+        type: t.Optional[str] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -333,11 +367,12 @@ class LicenseClient(NamespacedClient):
           <p>To check the status of your trial, use the get trial status API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trial.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial>`_
 
         :param acknowledge: whether the user has acknowledged acknowledge messages (default:
             false)
-        :param type_query_string:
+        :param master_timeout: Period to wait for a connection to the master node.
+        :param type: The type of trial license to generate (default: "trial")
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_license/start_trial"
@@ -350,10 +385,12 @@ class LicenseClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
-        if type_query_string is not None:
-            __query["type_query_string"] = type_query_string
+        if type is not None:
+            __query["type"] = type
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
diff -pruN 8.17.2-2/elasticsearch/_sync/client/logstash.py 9.2.0-1/elasticsearch/_sync/client/logstash.py
--- 8.17.2-2/elasticsearch/_sync/client/logstash.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/logstash.py	2025-10-28 16:50:53.000000000 +0000
@@ -43,7 +43,7 @@ class LogstashClient(NamespacedClient):
           If the request succeeds, you receive an empty response with an appropriate status code.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-delete-pipeline.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline>`_
 
         :param id: An identifier for the pipeline.
         """
@@ -87,7 +87,7 @@ class LogstashClient(NamespacedClient):
           Get pipelines that are used for Logstash Central Management.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-get-pipeline.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline>`_
 
         :param id: A comma-separated list of pipeline identifiers.
         """
@@ -139,9 +139,11 @@ class LogstashClient(NamespacedClient):
           If the specified pipeline exists, it is replaced.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/logstash-api-put-pipeline.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline>`_
 
-        :param id: An identifier for the pipeline.
+        :param id: An identifier for the pipeline. Pipeline IDs must begin with a letter
+            or underscore and contain only letters, underscores, dashes, hyphens and
+            numbers.
         :param pipeline:
         """
         if id in SKIP_IN_PATH:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/migration.py 9.2.0-1/elasticsearch/_sync/client/migration.py
--- 8.17.2-2/elasticsearch/_sync/client/migration.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class MigrationClient(NamespacedClient):
           You are strongly recommended to use the Upgrade Assistant.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/migration-api-deprecation.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations>`_
 
         :param index: Comma-separate list of data streams or indices to check. Wildcard
             (*) expressions are supported.
@@ -94,7 +94,7 @@ class MigrationClient(NamespacedClient):
           You are strongly recommended to use the Upgrade Assistant.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_migration/system_features"
@@ -136,7 +136,7 @@ class MigrationClient(NamespacedClient):
           <p>TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/feature-migration-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_migration/system_features"
diff -pruN 8.17.2-2/elasticsearch/_sync/client/ml.py 9.2.0-1/elasticsearch/_sync/client/ml.py
--- 8.17.2-2/elasticsearch/_sync/client/ml.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/ml.py	2025-10-28 16:50:53.000000000 +0000
@@ -38,14 +38,14 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Clear trained model deployment cache.
-          Cache will be cleared on all nodes where the trained model is assigned.
+          <p>Clear trained model deployment cache.</p>
+          <p>Cache will be cleared on all nodes where the trained model is assigned.
           A trained model deployment may have an inference cache enabled.
           As requests are handled by each allocated node, their responses may be cached on that individual node.
           Calling this API clears the caches without restarting the deployment.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-trained-model-deployment-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache>`_
 
         :param model_id: The unique identifier of the trained model.
         """
@@ -93,14 +93,14 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Close anomaly detection jobs.
-          A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.
+          <p>Close anomaly detection jobs.</p>
+          <p>A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.
           When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.
           If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.
           When a datafeed that has a specified end date stops, it automatically closes its associated job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-close-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job>`_
 
         :param job_id: Identifier for the anomaly detection job. It can be a job identifier,
             a group name, or a wildcard expression. You can close multiple anomaly detection
@@ -161,11 +161,11 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a calendar.
-          Removes all scheduled events from a calendar, then deletes it.</p>
+          <p>Delete a calendar.</p>
+          <p>Remove all scheduled events from a calendar, then delete it.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-calendar.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         """
@@ -209,7 +209,7 @@ class MlClient(NamespacedClient):
           <p>Delete events from a calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-calendar-event.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param event_id: Identifier for the scheduled event. You can obtain this identifier
@@ -260,7 +260,7 @@ class MlClient(NamespacedClient):
           <p>Delete anomaly jobs from a calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-calendar-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param job_id: An identifier for the anomaly detection jobs. It can be a job
@@ -312,7 +312,7 @@ class MlClient(NamespacedClient):
           <p>Delete a data frame analytics job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job.
         :param force: If `true`, it deletes a job that is not stopped; this method is
@@ -363,7 +363,7 @@ class MlClient(NamespacedClient):
           <p>Delete a datafeed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -415,18 +415,18 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete expired ML data.
-          Deletes all job results, model snapshots and forecast data that have exceeded
+          <p>Delete expired ML data.</p>
+          <p>Delete all job results, model snapshots and forecast data that have exceeded
           their retention days period. Machine learning state documents that are not
           associated with any job are also deleted.
           You can limit the request to a single or set of anomaly detection jobs by
           using a job identifier, a group name, a comma-separated list of jobs, or a
           wildcard expression. You can delete expired data for all anomaly detection
-          jobs by using _all, by specifying * as the &lt;job_id&gt;, or by omitting the
-          &lt;job_id&gt;.</p>
+          jobs by using <code>_all</code>, by specifying <code>*</code> as the <code>&lt;job_id&gt;</code>, or by omitting the
+          <code>&lt;job_id&gt;</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-expired-data.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data>`_
 
         :param job_id: Identifier for an anomaly detection job. It can be a job identifier,
             a group name, or a wildcard expression.
@@ -485,12 +485,12 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a filter.
-          If an anomaly detection job references the filter, you cannot delete the
+          <p>Delete a filter.</p>
+          <p>If an anomaly detection job references the filter, you cannot delete the
           filter. You must update or delete the job before you can delete the filter.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-filter.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter>`_
 
         :param filter_id: A string that uniquely identifies a filter.
         """
@@ -533,14 +533,14 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete forecasts from a job.
-          By default, forecasts are retained for 14 days. You can specify a
+          <p>Delete forecasts from a job.</p>
+          <p>By default, forecasts are retained for 14 days. You can specify a
           different retention period with the <code>expires_in</code> parameter in the forecast
           jobs API. The delete forecast API enables you to delete one or more
           forecasts before they expire.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-forecast.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param forecast_id: A comma-separated list of forecast identifiers. If you do
@@ -607,8 +607,8 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete an anomaly detection job.
-          All job configuration, model state and results are deleted.
+          <p>Delete an anomaly detection job.</p>
+          <p>All job configuration, model state and results are deleted.
           It is not currently possible to delete multiple jobs using wildcards or a
           comma separated list. If you delete a job that has a datafeed, the request
           first tries to delete the datafeed. This behavior is equivalent to calling
@@ -616,7 +616,7 @@ class MlClient(NamespacedClient):
           delete job request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param delete_user_annotations: Specifies whether annotations that have been
@@ -670,13 +670,13 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a model snapshot.
-          You cannot delete the active model snapshot. To delete that snapshot, first
+          <p>Delete a model snapshot.</p>
+          <p>You cannot delete the active model snapshot. To delete that snapshot, first
           revert to a different one. To identify the active model snapshot, refer to
           the <code>model_snapshot_id</code> in the results from the get jobs API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-delete-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: Identifier for the model snapshot.
@@ -719,19 +719,22 @@ class MlClient(NamespacedClient):
         force: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Delete an unreferenced trained model.
-          The request deletes a trained inference model that is not referenced by an ingest pipeline.</p>
+          <p>Delete an unreferenced trained model.</p>
+          <p>The request deletes a trained inference model that is not referenced by an ingest pipeline.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-trained-models.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model>`_
 
         :param model_id: The unique identifier of the trained model.
         :param force: Forcefully deletes a trained model that is referenced by ingest
             pipelines or has a started deployment.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         if model_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'model_id'")
@@ -748,6 +751,8 @@ class MlClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "DELETE",
@@ -772,13 +777,13 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a trained model alias.
-          This API deletes an existing model alias that refers to a trained model. If
+          <p>Delete a trained model alias.</p>
+          <p>This API deletes an existing model alias that refers to a trained model. If
           the model alias is missing or refers to a model other than the one identified
           by the <code>model_id</code>, this API returns an error.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-trained-models-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias>`_
 
         :param model_id: The trained model ID to which the model alias refers.
         :param model_alias: The model alias to delete.
@@ -833,13 +838,13 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Estimate job model memory usage.
-          Makes an estimation of the memory usage for an anomaly detection job model.
-          It is based on analysis configuration details for the job and cardinality
+          <p>Estimate job model memory usage.</p>
+          <p>Make an estimation of the memory usage for an anomaly detection job model.
+          The estimate is based on analysis configuration details for the job and cardinality
           estimates for the fields it references.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-apis.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory>`_
 
         :param analysis_config: For a list of the properties that you can specify in
             the `analysis_config` component of the body of this API.
@@ -904,14 +909,14 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Evaluate data frame analytics.
-          The API packages together commonly used evaluation metrics for various types
+          <p>Evaluate data frame analytics.</p>
+          <p>The API packages together commonly used evaluation metrics for various types
           of machine learning features. This has been designed for use on indexes
           created by data frame analytics. Evaluation requires both a ground truth
           field and an analytics result field to be present.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/evaluate-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame>`_
 
         :param evaluation: Defines the type of evaluation you want to perform.
         :param index: Defines the `index` in which the evaluation will be performed.
@@ -985,8 +990,8 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Explain data frame analytics config.
-          This API provides explanations for a data frame analytics config that either
+          <p>Explain data frame analytics config.</p>
+          <p>This API provides explanations for a data frame analytics config that either
           exists already or one that has not been created yet. The following
           explanations are provided:</p>
           <ul>
@@ -996,7 +1001,7 @@ class MlClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/explain-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -1107,7 +1112,7 @@ class MlClient(NamespacedClient):
           analyzing further data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-flush-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param advance_time: Refer to the description for the `advance_time` query parameter.
@@ -1182,7 +1187,7 @@ class MlClient(NamespacedClient):
           based on historical data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-forecast.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast>`_
 
         :param job_id: Identifier for the anomaly detection job. The job must be open
             when you create a forecast; otherwise, an error occurs.
@@ -1268,7 +1273,7 @@ class MlClient(NamespacedClient):
           The API presents a chronological view of the records, grouped by bucket.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-bucket.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param timestamp: The timestamp of a single bucket result. If you do not specify
@@ -1366,7 +1371,7 @@ class MlClient(NamespacedClient):
           <p>Get info about events in calendars.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-calendar-event.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events>`_
 
         :param calendar_id: A string that uniquely identifies a calendar. You can get
             information for multiple calendars by using a comma-separated list of ids
@@ -1435,7 +1440,7 @@ class MlClient(NamespacedClient):
           <p>Get calendar configuration info.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-calendar.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars>`_
 
         :param calendar_id: A string that uniquely identifies a calendar. You can get
             information for multiple calendars by using a comma-separated list of ids
@@ -1511,7 +1516,7 @@ class MlClient(NamespacedClient):
           <p>Get anomaly detection job results for categories.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-category.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param category_id: Identifier for the category, which is unique in the job.
@@ -1599,7 +1604,7 @@ class MlClient(NamespacedClient):
           wildcard expression.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. If you do not specify
             this option, the API returns information for the first hundred data frame
@@ -1671,10 +1676,10 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get data frame analytics jobs usage info.</p>
+          <p>Get data frame analytics job stats.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-dfanalytics-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats>`_
 
         :param id: Identifier for the data frame analytics job. If you do not specify
             this option, the API returns information for the first hundred data frame
@@ -1739,7 +1744,7 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get datafeeds usage info.
+          <p>Get datafeed stats.
           You can get statistics for multiple datafeeds in a single API request by
           using a comma-separated list of datafeeds or a wildcard expression. You can
           get statistics for all datafeeds by using <code>_all</code>, by specifying <code>*</code> as the
@@ -1748,7 +1753,7 @@ class MlClient(NamespacedClient):
           This API returns a maximum of 10,000 datafeeds.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-datafeed-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats>`_
 
         :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier
             or a wildcard expression. If you do not specify one of these options, the
@@ -1812,7 +1817,7 @@ class MlClient(NamespacedClient):
           This API returns a maximum of 10,000 datafeeds.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds>`_
 
         :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier
             or a wildcard expression. If you do not specify one of these options, the
@@ -1879,7 +1884,7 @@ class MlClient(NamespacedClient):
           You can get a single filter or all filters.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-filter.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters>`_
 
         :param filter_id: A string that uniquely identifies a filter.
         :param from_: Skips the specified number of filters.
@@ -1947,7 +1952,7 @@ class MlClient(NamespacedClient):
           <code>influencer_field_name</code> is specified in the job configuration.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-influencer.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param desc: If true, the results are sorted in descending order.
@@ -2028,10 +2033,10 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get anomaly detection jobs usage info.</p>
+          <p>Get anomaly detection job stats.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-job-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats>`_
 
         :param job_id: Identifier for the anomaly detection job. It can be a job identifier,
             a group name, a comma-separated list of jobs, or a wildcard expression. If
@@ -2095,7 +2100,7 @@ class MlClient(NamespacedClient):
           <code>_all</code>, by specifying <code>*</code> as the <code>&lt;job_id&gt;</code>, or by omitting the <code>&lt;job_id&gt;</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs>`_
 
         :param job_id: Identifier for the anomaly detection job. It can be a job identifier,
             a group name, or a wildcard expression. If you do not specify one of these
@@ -2161,7 +2166,7 @@ class MlClient(NamespacedClient):
           on each node, both within the JVM heap, and natively, outside of the JVM.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ml-memory.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats>`_
 
         :param node_id: The names of particular nodes in the cluster to target. For example,
             `nodeId1,nodeId2` or `ml:true`
@@ -2219,7 +2224,7 @@ class MlClient(NamespacedClient):
           <p>Get anomaly detection job model snapshot upgrade usage info.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-job-model-snapshot-upgrade-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: A numerical character string that uniquely identifies the
@@ -2293,7 +2298,7 @@ class MlClient(NamespacedClient):
           <p>Get model snapshots info.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: A numerical character string that uniquely identifies the
@@ -2385,7 +2390,7 @@ class MlClient(NamespacedClient):
         exclude_interim: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        overall_score: t.Optional[t.Union[float, str]] = None,
+        overall_score: t.Optional[float] = None,
         pretty: t.Optional[bool] = None,
         start: t.Optional[t.Union[str, t.Any]] = None,
         top_n: t.Optional[int] = None,
@@ -2413,7 +2418,7 @@ class MlClient(NamespacedClient):
           jobs' largest bucket span.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-overall-buckets.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets>`_
 
         :param job_id: Identifier for the anomaly detection job. It can be a job identifier,
             a group name, a comma-separated list of jobs or groups, or a wildcard expression.
@@ -2523,7 +2528,7 @@ class MlClient(NamespacedClient):
           number of detectors.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-get-record.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param desc: Refer to the description for the `desc` query parameter.
@@ -2611,7 +2616,6 @@ class MlClient(NamespacedClient):
                 ],
             ]
         ] = None,
-        include_model_definition: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
         size: t.Optional[int] = None,
         tags: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -2622,7 +2626,7 @@ class MlClient(NamespacedClient):
           <p>Get trained model configuration info.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-trained-models.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models>`_
 
         :param model_id: The unique identifier of the trained model or a model alias.
             You can get information for multiple trained models in a single API request
@@ -2641,8 +2645,6 @@ class MlClient(NamespacedClient):
         :param from_: Skips the specified number of models.
         :param include: A comma delimited string of optional fields to include in the
             response body.
-        :param include_model_definition: parameter is deprecated! Use [include=definition]
-            instead
         :param size: Specifies the maximum number of models to obtain.
         :param tags: A comma delimited string of tags. A trained model can have many
             tags, or none. When supplied, only trained models that contain all the supplied
@@ -2672,8 +2674,6 @@ class MlClient(NamespacedClient):
             __query["human"] = human
         if include is not None:
             __query["include"] = include
-        if include_model_definition is not None:
-            __query["include_model_definition"] = include_model_definition
         if pretty is not None:
             __query["pretty"] = pretty
         if size is not None:
@@ -2713,7 +2713,7 @@ class MlClient(NamespacedClient):
           models in a single API request by using a comma-separated list of model IDs or a wildcard expression.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-trained-models-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats>`_
 
         :param model_id: The unique identifier of the trained model or a model alias.
             It can be a comma-separated list or a wildcard expression.
@@ -2779,7 +2779,7 @@ class MlClient(NamespacedClient):
           <p>Evaluate a trained model.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/infer-trained-model.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model>`_
 
         :param model_id: The unique identifier of the trained model.
         :param docs: An array of objects to pass to the model for inference. The objects
@@ -2846,7 +2846,7 @@ class MlClient(NamespacedClient):
           cluster configuration.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-ml-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ml/info"
@@ -2886,8 +2886,8 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Open anomaly detection jobs.
-          An anomaly detection job must be opened to be ready to receive and analyze
+          <p>Open anomaly detection jobs.</p>
+          <p>An anomaly detection job must be opened to be ready to receive and analyze
           data. It can be opened and closed multiple times throughout its lifecycle.
           When you open a new job, it starts with an empty model.
           When you open an existing job, the most recent model state is automatically
@@ -2895,7 +2895,7 @@ class MlClient(NamespacedClient):
           new data is received.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-open-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param timeout: Refer to the description for the `timeout` query parameter.
@@ -2952,7 +2952,7 @@ class MlClient(NamespacedClient):
           <p>Add scheduled events to the calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-post-calendar-event.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param events: A list of one of more scheduled events. The event’s start and
@@ -3013,7 +3013,7 @@ class MlClient(NamespacedClient):
           It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-post-data.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data>`_
 
         :param job_id: Identifier for the anomaly detection job. The job must have a
             state of open to receive and process the data.
@@ -3077,10 +3077,10 @@ class MlClient(NamespacedClient):
         .. raw:: html
 
           <p>Preview features used by data frame analytics.
-          Previews the extracted features used by a data frame analytics config.</p>
+          Preview the extracted features used by a data frame analytics config.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/preview-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job.
         :param config: A data frame analytics config as described in create data frame
@@ -3153,7 +3153,7 @@ class MlClient(NamespacedClient):
           You can also use secondary authorization headers to supply the credentials.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-preview-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -3232,7 +3232,7 @@ class MlClient(NamespacedClient):
           <p>Create a calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-calendar.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param description: A description of the calendar.
@@ -3289,7 +3289,7 @@ class MlClient(NamespacedClient):
           <p>Add anomaly detection job to calendar.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-calendar-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job>`_
 
         :param calendar_id: A string that uniquely identifies a calendar.
         :param job_id: An identifier for the anomaly detection jobs. It can be a job
@@ -3372,7 +3372,7 @@ class MlClient(NamespacedClient):
           <p>If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -3557,7 +3557,7 @@ class MlClient(NamespacedClient):
           directly to the <code>.ml-config</code> index. Do not give users <code>write</code> privileges on the <code>.ml-config</code> index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -3599,11 +3599,11 @@ class MlClient(NamespacedClient):
         :param ignore_unavailable: If true, unavailable indices (missing or closed) are
             ignored.
         :param indexes: An array of index names. Wildcards are supported. If any of the
-            indices are in remote clusters, the machine learning nodes must have the
-            `remote_cluster_client` role.
+            indices are in remote clusters, the master nodes and the machine learning
+            nodes must have the `remote_cluster_client` role.
         :param indices: An array of index names. Wildcards are supported. If any of the
-            indices are in remote clusters, the machine learning nodes must have the
-            `remote_cluster_client` role.
+            indices are in remote clusters, the master nodes and the machine learning
+            nodes must have the `remote_cluster_client` role.
         :param indices_options: Specifies index expansion options that are used during
             search
         :param job_id: Identifier for the anomaly detection job.
@@ -3719,7 +3719,7 @@ class MlClient(NamespacedClient):
           Specifically, filters are referenced in the <code>custom_rules</code> property of detector configuration objects.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-filter.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter>`_
 
         :param filter_id: A string that uniquely identifies a filter.
         :param description: A description of the filter.
@@ -3816,12 +3816,12 @@ class MlClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Create an anomaly detection job.
-          If you include a <code>datafeed_config</code>, you must have read index privileges on the source index.
+          <p>Create an anomaly detection job.</p>
+          <p>If you include a <code>datafeed_config</code>, you must have read index privileges on the source index.
           If you include a <code>datafeed_config</code> but do not provide a query, the datafeed uses <code>{&quot;match_all&quot;: {&quot;boost&quot;: 1}}</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-put-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job>`_
 
         :param job_id: The identifier for the anomaly detection job. This identifier
             can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and
@@ -3871,13 +3871,7 @@ class MlClient(NamespacedClient):
         :param description: A description of the job.
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
-            expressions match hidden data streams. Supports comma-separated values. Valid
-            values are: * `all`: Match any data stream or index, including hidden ones.
-            * `closed`: Match closed, non-hidden indices. Also matches any non-hidden
-            data stream. Data streams cannot be closed. * `hidden`: Match hidden data
-            streams and hidden indices. Must be combined with `open`, `closed`, or both.
-            * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden
-            indices. Also matches any non-hidden data stream.
+            expressions match hidden data streams. Supports comma-separated values.
         :param groups: A list of job groups. A job can belong to no groups or many.
         :param ignore_throttled: If `true`, concrete, expanded or aliased indices are
             ignored when frozen.
@@ -4029,7 +4023,7 @@ class MlClient(NamespacedClient):
           Enable you to supply a trained model that is not created by data frame analytics.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-trained-models.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model>`_
 
         :param model_id: The unique identifier of the trained model.
         :param compressed_definition: The compressed (GZipped and Base64 encoded) inference
@@ -4150,7 +4144,7 @@ class MlClient(NamespacedClient):
           returns a warning.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-trained-models-aliases.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias>`_
 
         :param model_id: The identifier for the trained model that the alias refers to.
         :param model_alias: The alias to create or update. This value cannot end in numbers.
@@ -4211,7 +4205,7 @@ class MlClient(NamespacedClient):
           <p>Create part of a trained model definition.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-trained-model-definition-part.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part>`_
 
         :param model_id: The unique identifier of the trained model.
         :param part: The definition part number. When the definition is loaded for inference
@@ -4293,7 +4287,7 @@ class MlClient(NamespacedClient):
           The vocabulary is stored in the index as described in <code>inference_config.*.vocabulary</code> of the trained model definition.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-trained-model-vocabulary.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary>`_
 
         :param model_id: The unique identifier of the trained model.
         :param vocabulary: The model vocabulary, which must not be empty.
@@ -4356,7 +4350,7 @@ class MlClient(NamespacedClient):
           comma separated list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-reset-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job>`_
 
         :param job_id: The ID of the job to reset.
         :param delete_user_annotations: Specifies whether annotations that have been
@@ -4420,7 +4414,7 @@ class MlClient(NamespacedClient):
           snapshot after Black Friday or a critical system failure.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-revert-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: You can specify `empty` as the <snapshot_id>. Reverting to
@@ -4495,7 +4489,7 @@ class MlClient(NamespacedClient):
           machine learning info API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-set-upgrade-mode.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode>`_
 
         :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts
             all job and datafeed tasks and prohibits new job and datafeed tasks from
@@ -4555,7 +4549,7 @@ class MlClient(NamespacedClient):
           the destination index in advance with custom settings and mappings.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -4618,7 +4612,7 @@ class MlClient(NamespacedClient):
           authorization headers when you created or updated the datafeed, those credentials are used instead.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-start-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -4664,11 +4658,14 @@ class MlClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
-    @_rewrite_parameters()
+    @_rewrite_parameters(
+        body_fields=("adaptive_allocations",),
+    )
     def start_trained_model_deployment(
         self,
         *,
         model_id: str,
+        adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None,
         cache_size: t.Optional[t.Union[int, str]] = None,
         deployment_id: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
@@ -4683,6 +4680,7 @@ class MlClient(NamespacedClient):
         wait_for: t.Optional[
             t.Union[str, t.Literal["fully_allocated", "started", "starting"]]
         ] = None,
+        body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -4691,10 +4689,13 @@ class MlClient(NamespacedClient):
           It allocates the model to every machine learning node.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-trained-model-deployment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment>`_
 
         :param model_id: The unique identifier of the trained model. Currently, only
             PyTorch models are supported.
+        :param adaptive_allocations: Adaptive allocations configuration. When enabled,
+            the number of allocations is set based on the current load. If adaptive_allocations
+            is enabled, do not set the number of allocations manually.
         :param cache_size: The inference cache size (in memory outside the JVM heap)
             per node for the model. The default value is the same size as the `model_size_bytes`.
             To disable the cache, `0b` can be provided.
@@ -4704,7 +4705,8 @@ class MlClient(NamespacedClient):
             model in memory but use a separate set of threads to evaluate the model.
             Increasing this value generally increases the throughput. If this setting
             is greater than the number of hardware threads it will automatically be changed
-            to a value less than the number of hardware threads.
+            to a value less than the number of hardware threads. If adaptive_allocations
+            is enabled, do not set this value, because it’s automatically set.
         :param priority: The deployment priority.
         :param queue_capacity: Specifies the number of inference requests that are allowed
             in the queue. After the number of requests exceeds this value, new requests
@@ -4724,6 +4726,7 @@ class MlClient(NamespacedClient):
         __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)}
         __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_start'
         __query: t.Dict[str, t.Any] = {}
+        __body: t.Dict[str, t.Any] = body if body is not None else {}
         if cache_size is not None:
             __query["cache_size"] = cache_size
         if deployment_id is not None:
@@ -4748,12 +4751,20 @@ class MlClient(NamespacedClient):
             __query["timeout"] = timeout
         if wait_for is not None:
             __query["wait_for"] = wait_for
+        if not __body:
+            if adaptive_allocations is not None:
+                __body["adaptive_allocations"] = adaptive_allocations
+        if not __body:
+            __body = None  # type: ignore[assignment]
         __headers = {"accept": "application/json"}
+        if __body is not None:
+            __headers["content-type"] = "application/json"
         return self.perform_request(  # type: ignore[return-value]
             "POST",
             __path,
             params=__query,
             headers=__headers,
+            body=__body,
             endpoint_id="ml.start_trained_model_deployment",
             path_parts=__path_parts,
         )
@@ -4779,7 +4790,7 @@ class MlClient(NamespacedClient):
           throughout its lifecycle.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/stop-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -4849,7 +4860,7 @@ class MlClient(NamespacedClient):
           multiple times throughout its lifecycle.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-stop-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed>`_
 
         :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds
             in a single API request by using a comma-separated list of datafeeds or a
@@ -4914,7 +4925,7 @@ class MlClient(NamespacedClient):
           <p>Stop a trained model deployment.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/stop-trained-model-deployment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment>`_
 
         :param model_id: The unique identifier of the trained model.
         :param allow_no_match: Specifies what to do when the request: contains wildcard
@@ -4982,7 +4993,7 @@ class MlClient(NamespacedClient):
           <p>Update a data frame analytics job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-dfanalytics.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics>`_
 
         :param id: Identifier for the data frame analytics job. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -5097,7 +5108,7 @@ class MlClient(NamespacedClient):
           those credentials are used instead.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-update-datafeed.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed>`_
 
         :param datafeed_id: A numerical character string that uniquely identifies the
             datafeed. This identifier can contain lowercase alphanumeric characters (a-z
@@ -5123,13 +5134,7 @@ class MlClient(NamespacedClient):
             check runs only on real-time datafeeds.
         :param expand_wildcards: Type of index that wildcard patterns can match. If the
             request can target data streams, this argument determines whether wildcard
-            expressions match hidden data streams. Supports comma-separated values. Valid
-            values are: * `all`: Match any data stream or index, including hidden ones.
-            * `closed`: Match closed, non-hidden indices. Also matches any non-hidden
-            data stream. Data streams cannot be closed. * `hidden`: Match hidden data
-            streams and hidden indices. Must be combined with `open`, `closed`, or both.
-            * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden
-            indices. Also matches any non-hidden data stream.
+            expressions match hidden data streams. Supports comma-separated values.
         :param frequency: The interval at which scheduled queries are made while the
             datafeed runs in real time. The default value is either the bucket span for
             short bucket spans, or, for longer bucket spans, a sensible fraction of the
@@ -5264,7 +5269,7 @@ class MlClient(NamespacedClient):
           Updates the description of a filter, adds items, or removes items from the list.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-update-filter.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter>`_
 
         :param filter_id: A string that uniquely identifies a filter.
         :param add_items: The items to add to the filter.
@@ -5358,7 +5363,7 @@ class MlClient(NamespacedClient):
           Updates certain properties of an anomaly detection job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-update-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job>`_
 
         :param job_id: Identifier for the job.
         :param allow_lazy_open: Advanced configuration option. Specifies whether this
@@ -5490,7 +5495,7 @@ class MlClient(NamespacedClient):
           Updates certain properties of a snapshot.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-update-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: Identifier for the model snapshot.
@@ -5535,12 +5540,13 @@ class MlClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("number_of_allocations",),
+        body_fields=("adaptive_allocations", "number_of_allocations"),
     )
     def update_trained_model_deployment(
         self,
         *,
         model_id: str,
+        adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -5554,16 +5560,20 @@ class MlClient(NamespacedClient):
           <p>Update a trained model deployment.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-trained-model-deployment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment>`_
 
         :param model_id: The unique identifier of the trained model. Currently, only
             PyTorch models are supported.
+        :param adaptive_allocations: Adaptive allocations configuration. When enabled,
+            the number of allocations is set based on the current load. If adaptive_allocations
+            is enabled, do not set the number of allocations manually.
         :param number_of_allocations: The number of model allocations on each node where
             the model is deployed. All allocations on a node share the same copy of the
             model in memory but use a separate set of threads to evaluate the model.
             Increasing this value generally increases the throughput. If this setting
             is greater than the number of hardware threads it will automatically be changed
-            to a value less than the number of hardware threads.
+            to a value less than the number of hardware threads. If adaptive_allocations
+            is enabled, do not set this value, because it’s automatically set.
         """
         if model_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'model_id'")
@@ -5580,6 +5590,8 @@ class MlClient(NamespacedClient):
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
+            if adaptive_allocations is not None:
+                __body["adaptive_allocations"] = adaptive_allocations
             if number_of_allocations is not None:
                 __body["number_of_allocations"] = number_of_allocations
         if not __body:
@@ -5614,7 +5626,7 @@ class MlClient(NamespacedClient):
         .. raw:: html
 
           <p>Upgrade a snapshot.
-          Upgrades an anomaly detection model snapshot to the latest major version.
+          Upgrade an anomaly detection model snapshot to the latest major version.
           Over time, older snapshot formats are deprecated and removed. Anomaly
           detection jobs support only snapshots that are from the current or previous
           major version.
@@ -5625,7 +5637,7 @@ class MlClient(NamespacedClient):
           job.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/ml-upgrade-job-model-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot>`_
 
         :param job_id: Identifier for the anomaly detection job.
         :param snapshot_id: A numerical character string that uniquely identifies the
@@ -5704,7 +5716,7 @@ class MlClient(NamespacedClient):
           <p>Validate an anomaly detection job.</p>
 
 
-        `<https://www.elastic.co/guide/en/machine-learning/8.17/ml-jobs.html>`_
+        `<https://www.elastic.co/guide/en/machine-learning/9.2/ml-jobs.html>`_
 
         :param analysis_config:
         :param analysis_limits:
@@ -5777,7 +5789,7 @@ class MlClient(NamespacedClient):
           <p>Validate an anomaly detection job.</p>
 
 
-        `<https://www.elastic.co/guide/en/machine-learning/8.17/ml-jobs.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch>`_
 
         :param detector:
         """
diff -pruN 8.17.2-2/elasticsearch/_sync/client/monitoring.py 9.2.0-1/elasticsearch/_sync/client/monitoring.py
--- 8.17.2-2/elasticsearch/_sync/client/monitoring.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/monitoring.py	2025-10-28 16:50:53.000000000 +0000
@@ -48,7 +48,7 @@ class MonitoringClient(NamespacedClient)
           This API is used by the monitoring features to send monitoring data.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/monitor-elasticsearch-cluster.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch>`_
 
         :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload
         :param operations:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/nodes.py 9.2.0-1/elasticsearch/_sync/client/nodes.py
--- 8.17.2-2/elasticsearch/_sync/client/nodes.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/nodes.py	2025-10-28 16:50:53.000000000 +0000
@@ -50,7 +50,7 @@ class NodesClient(NamespacedClient):
           Clear the archived repositories metering information in the cluster.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-repositories-metering-archive-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive>`_
 
         :param node_id: Comma-separated list of node IDs or names used to limit returned
             information.
@@ -105,10 +105,10 @@ class NodesClient(NamespacedClient):
           Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-repositories-metering-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info>`_
 
         :param node_id: Comma-separated list of node IDs or names used to limit returned
-            information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).
+            information.
         """
         if node_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'node_id'")
@@ -143,7 +143,6 @@ class NodesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         ignore_idle_threads: t.Optional[bool] = None,
         interval: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         snapshots: t.Optional[int] = None,
         sort: t.Optional[
@@ -163,15 +162,12 @@ class NodesClient(NamespacedClient):
           The output is plain text with a breakdown of the top hot threads for each node.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-hot-threads.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads>`_
 
         :param node_id: List of node IDs or names used to limit returned information.
         :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket
             select, or to get a task from an empty queue) are filtered out.
         :param interval: The interval to do the second sampling of threads.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
         :param snapshots: Number of samples of thread stacktrace.
         :param sort: The sort order for 'cpu' type (default: total)
         :param threads: Specifies the number of hot threads to provide information for.
@@ -197,8 +193,6 @@ class NodesClient(NamespacedClient):
             __query["ignore_idle_threads"] = ignore_idle_threads
         if interval is not None:
             __query["interval"] = interval
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if snapshots is not None:
@@ -231,27 +225,23 @@ class NodesClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         flat_settings: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Get node information.
-          By default, the API returns all attributes and core settings for cluster nodes.</p>
+          <p>Get node information.</p>
+          <p>By default, the API returns all attributes and core settings for cluster nodes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-info.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info>`_
 
         :param node_id: Comma-separated list of node IDs or names used to limit returned
             information.
         :param metric: Limits the information returned to the specific metrics. Supports
             a comma-separated list, such as http,ingest.
         :param flat_settings: If true, returns settings in flat format.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
         :param timeout: Period to wait for a response. If no response is received before
             the timeout expires, the request fails and returns an error.
         """
@@ -277,8 +267,6 @@ class NodesClient(NamespacedClient):
             __query["flat_settings"] = flat_settings
         if human is not None:
             __query["human"] = human
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if timeout is not None:
@@ -320,7 +308,7 @@ class NodesClient(NamespacedClient):
           Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/secure-settings.html#reloadable-secure-settings>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings>`_
 
         :param node_id: The names of particular nodes in the cluster to target.
         :param secure_settings_password: The password for the Elasticsearch keystore.
@@ -380,10 +368,7 @@ class NodesClient(NamespacedClient):
         human: t.Optional[bool] = None,
         include_segment_file_sizes: t.Optional[bool] = None,
         include_unloaded_segments: t.Optional[bool] = None,
-        level: t.Optional[
-            t.Union[str, t.Literal["cluster", "indices", "shards"]]
-        ] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        level: t.Optional[t.Union[str, t.Literal["indices", "node", "shards"]]] = None,
         pretty: t.Optional[bool] = None,
         timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         types: t.Optional[t.Sequence[str]] = None,
@@ -396,7 +381,7 @@ class NodesClient(NamespacedClient):
           By default, all stats are returned. You can limit the returned information by using metrics.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats>`_
 
         :param node_id: Comma-separated list of node IDs or names used to limit returned
             information.
@@ -417,11 +402,8 @@ class NodesClient(NamespacedClient):
             are requested).
         :param include_unloaded_segments: If `true`, the response includes information
             from segments that are not loaded into memory.
-        :param level: Indicates whether statistics are aggregated at the cluster, index,
-            or shard level.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
+        :param level: Indicates whether statistics are aggregated at the node, indices,
+            or shards level.
         :param timeout: Period to wait for a response. If no response is received before
             the timeout expires, the request fails and returns an error.
         :param types: A comma-separated list of document types for the indexing index
@@ -480,8 +462,6 @@ class NodesClient(NamespacedClient):
             __query["include_unloaded_segments"] = include_unloaded_segments
         if level is not None:
             __query["level"] = level
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         if timeout is not None:
@@ -516,7 +496,7 @@ class NodesClient(NamespacedClient):
           <p>Get feature usage information.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/cluster-nodes-usage.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage>`_
 
         :param node_id: A comma-separated list of node IDs or names to limit the returned
             information; use `_local` to return information from the node you're connecting
diff -pruN 8.17.2-2/elasticsearch/_sync/client/project.py 9.2.0-1/elasticsearch/_sync/client/project.py
--- 8.17.2-2/elasticsearch/_sync/client/project.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/project.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,67 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import typing as t
+
+from elastic_transport import ObjectApiResponse
+
+from ._base import NamespacedClient
+from .utils import (
+    Stability,
+    _rewrite_parameters,
+    _stability_warning,
+)
+
+
+class ProjectClient(NamespacedClient):
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def tags(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Return tags defined for the project</p>
+
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_project/tags"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="project.tags",
+            path_parts=__path_parts,
+        )
diff -pruN 8.17.2-2/elasticsearch/_sync/client/query_rules.py 9.2.0-1/elasticsearch/_sync/client/query_rules.py
--- 8.17.2-2/elasticsearch/_sync/client/query_rules.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/query_rules.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class QueryRulesClient(NamespacedClient)
           This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule>`_
 
         :param ruleset_id: The unique identifier of the query ruleset containing the
             rule to delete
@@ -97,7 +97,7 @@ class QueryRulesClient(NamespacedClient)
           This is a destructive action that is not recoverable.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-query-ruleset.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset>`_
 
         :param ruleset_id: The unique identifier of the query ruleset to delete
         """
@@ -142,7 +142,7 @@ class QueryRulesClient(NamespacedClient)
           Get details about a query rule within a query ruleset.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule>`_
 
         :param ruleset_id: The unique identifier of the query ruleset containing the
             rule to retrieve
@@ -194,7 +194,7 @@ class QueryRulesClient(NamespacedClient)
           Get details about a query ruleset.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-query-ruleset.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset>`_
 
         :param ruleset_id: The unique identifier of the query ruleset
         """
@@ -241,7 +241,7 @@ class QueryRulesClient(NamespacedClient)
           Get summarized information about the query rulesets.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-query-rulesets.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets>`_
 
         :param from_: The offset from the first result to fetch.
         :param size: The maximum number of results to retrieve.
@@ -302,7 +302,7 @@ class QueryRulesClient(NamespacedClient)
           If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule>`_
 
         :param ruleset_id: The unique identifier of the query ruleset containing the
             rule to be created or updated.
@@ -389,7 +389,7 @@ class QueryRulesClient(NamespacedClient)
           If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-query-ruleset.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset>`_
 
         :param ruleset_id: The unique identifier of the query ruleset to be created or
             updated.
@@ -446,7 +446,7 @@ class QueryRulesClient(NamespacedClient)
           Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-query-ruleset.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test>`_
 
         :param ruleset_id: The unique identifier of the query ruleset to be created or
             updated
diff -pruN 8.17.2-2/elasticsearch/_sync/client/rollup.py 9.2.0-1/elasticsearch/_sync/client/rollup.py
--- 8.17.2-2/elasticsearch/_sync/client/rollup.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/rollup.py	2025-10-28 16:50:53.000000000 +0000
@@ -67,7 +67,7 @@ class RollupClient(NamespacedClient):
           </code></pre>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-delete-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job>`_
 
         :param id: Identifier for the job.
         """
@@ -115,7 +115,7 @@ class RollupClient(NamespacedClient):
           For details about a historical rollup job, the rollup capabilities API may be more useful.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs>`_
 
         :param id: Identifier for the rollup job. If it is `_all` or omitted, the API
             returns all rollup jobs.
@@ -171,7 +171,7 @@ class RollupClient(NamespacedClient):
           </ol>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-caps.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps>`_
 
         :param id: Index, indices or index-pattern to return rollup capabilities for.
             `_all` may be used to fetch rollup capabilities from all jobs.
@@ -225,7 +225,7 @@ class RollupClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-get-rollup-index-caps.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps>`_
 
         :param index: Data stream or index to check for rollup capabilities. Wildcard
             (`*`) expressions are supported.
@@ -295,7 +295,7 @@ class RollupClient(NamespacedClient):
           <p>Jobs are created in a <code>STOPPED</code> state. You can start them with the start rollup jobs API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-put-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job>`_
 
         :param id: Identifier for the rollup job. This can be any alphanumeric string
             and uniquely identifies the data that is associated with the rollup job.
@@ -419,31 +419,10 @@ class RollupClient(NamespacedClient):
           The following functionality is not available:</p>
           <p><code>size</code>: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.
           <code>highlighter</code>, <code>suggestors</code>, <code>post_filter</code>, <code>profile</code>, <code>explain</code>: These are similarly disallowed.</p>
-          <p><strong>Searching both historical rollup and non-rollup data</strong></p>
-          <p>The rollup search API has the capability to search across both &quot;live&quot; non-rollup data and the aggregated rollup data.
-          This is done by simply adding the live indices to the URI. For example:</p>
-          <pre><code>GET sensor-1,sensor_rollup/_rollup_search
-          {
-            &quot;size&quot;: 0,
-            &quot;aggregations&quot;: {
-               &quot;max_temperature&quot;: {
-                &quot;max&quot;: {
-                  &quot;field&quot;: &quot;temperature&quot;
-                }
-              }
-            }
-          }
-          </code></pre>
-          <p>The rollup search endpoint does two things when the search runs:</p>
-          <ul>
-          <li>The original request is sent to the non-rollup index unaltered.</li>
-          <li>A rewritten version of the original request is sent to the rollup index.</li>
-          </ul>
-          <p>When the two responses are received, the endpoint rewrites the rollup response and merges the two together.
-          During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.</p>
+          <p>For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search>`_
 
         :param index: A comma-separated list of data streams and indices used to limit
             the request. This parameter has the following rules: * At least one data
@@ -521,7 +500,7 @@ class RollupClient(NamespacedClient):
           If you try to start a job that is already started, nothing happens.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-start-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job>`_
 
         :param id: Identifier for the rollup job.
         """
@@ -575,7 +554,7 @@ class RollupClient(NamespacedClient):
           If the specified time elapses without the job moving to STOPPED, a timeout exception occurs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/rollup-stop-job.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job>`_
 
         :param id: Identifier for the rollup job.
         :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum)
diff -pruN 8.17.2-2/elasticsearch/_sync/client/search_application.py 9.2.0-1/elasticsearch/_sync/client/search_application.py
--- 8.17.2-2/elasticsearch/_sync/client/search_application.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/search_application.py	2025-10-28 16:50:53.000000000 +0000
@@ -45,13 +45,13 @@ class SearchApplicationClient(Namespaced
         """
         .. raw:: html
 
-          <p>Delete a search application.
-          Remove a search application and its associated alias. Indices attached to the search application are not removed.</p>
+          <p>Delete a search application.</p>
+          <p>Remove a search application and its associated alias. Indices attached to the search application are not removed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-search-application.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete>`_
 
-        :param name: The name of the search application to delete
+        :param name: The name of the search application to delete.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -94,7 +94,7 @@ class SearchApplicationClient(Namespaced
           The associated data stream is also deleted.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-analytics-collection.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics>`_
 
         :param name: The name of the analytics collection to be deleted
         """
@@ -138,7 +138,7 @@ class SearchApplicationClient(Namespaced
           <p>Get search application details.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-search-application.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get>`_
 
         :param name: The name of the search application
         """
@@ -182,7 +182,7 @@ class SearchApplicationClient(Namespaced
           <p>Get behavioral analytics collections.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-analytics-collection.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics>`_
 
         :param name: A list of analytics collections to limit the returned information
         """
@@ -234,7 +234,7 @@ class SearchApplicationClient(Namespaced
           Get information about search applications.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/list-search-applications.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics>`_
 
         :param from_: Starting offset.
         :param q: Query in the Lucene query string syntax.
@@ -290,7 +290,7 @@ class SearchApplicationClient(Namespaced
           <p>Create a behavioral analytics collection event.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/post-analytics-collection-event.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event>`_
 
         :param collection_name: The name of the behavioral analytics collection.
         :param event_type: The analytics event type.
@@ -357,7 +357,7 @@ class SearchApplicationClient(Namespaced
           <p>Create or update a search application.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-search-application.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put>`_
 
         :param name: The name of the search application to be created or updated.
         :param search_application:
@@ -414,7 +414,7 @@ class SearchApplicationClient(Namespaced
           <p>Create a behavioral analytics collection.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-analytics-collection.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics>`_
 
         :param name: The name of the analytics collection to be created or updated.
         """
@@ -467,7 +467,7 @@ class SearchApplicationClient(Namespaced
           <p>You must have <code>read</code> privileges on the backing alias of the search application.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-application-render-query.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query>`_
 
         :param name: The name of the search application to render teh query for.
         :param params:
@@ -531,7 +531,7 @@ class SearchApplicationClient(Namespaced
           Unspecified template parameters are assigned their default values if applicable.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/search-application-search.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search>`_
 
         :param name: The name of the search application to be searched.
         :param params: Query parameters specific to this request, which will override
diff -pruN 8.17.2-2/elasticsearch/_sync/client/searchable_snapshots.py 9.2.0-1/elasticsearch/_sync/client/searchable_snapshots.py
--- 8.17.2-2/elasticsearch/_sync/client/searchable_snapshots.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/searchable_snapshots.py	2025-10-28 16:50:53.000000000 +0000
@@ -50,7 +50,7 @@ class SearchableSnapshotsClient(Namespac
           Get statistics about the shared cache for partially mounted indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-cache-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats>`_
 
         :param node_id: The names of the nodes in the cluster to target.
         :param master_timeout:
@@ -111,7 +111,7 @@ class SearchableSnapshotsClient(Namespac
           Clear indices and data streams from the shared cache for partially mounted indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-clear-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache>`_
 
         :param index: A comma-separated list of data streams, indices, and aliases to
             clear from the cache. It supports wildcards (`*`).
@@ -190,7 +190,7 @@ class SearchableSnapshotsClient(Namespac
           Manually mounting ILM-managed snapshots can interfere with ILM processes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-mount-snapshot.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount>`_
 
         :param repository: The name of the repository containing the snapshot of the
             index to mount.
@@ -278,7 +278,7 @@ class SearchableSnapshotsClient(Namespac
           <p>Get searchable snapshot statistics.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/searchable-snapshots-api-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats>`_
 
         :param index: A comma-separated list of data streams and indices to retrieve
             statistics for.
diff -pruN 8.17.2-2/elasticsearch/_sync/client/security.py 9.2.0-1/elasticsearch/_sync/client/security.py
--- 8.17.2-2/elasticsearch/_sync/client/security.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/security.py	2025-10-28 16:50:53.000000000 +0000
@@ -58,7 +58,7 @@ class SecurityClient(NamespacedClient):
           Any updates do not change existing content for either the <code>labels</code> or <code>data</code> fields.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-activate-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile>`_
 
         :param grant_type: The type of grant.
         :param access_token: The user's Elasticsearch access token or JWT. Both `access`
@@ -124,7 +124,7 @@ class SecurityClient(NamespacedClient):
           If the user cannot be authenticated, this API returns a 401 status code.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-authenticate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/_authenticate"
@@ -171,7 +171,7 @@ class SecurityClient(NamespacedClient):
           The bulk delete roles API cannot delete roles that are defined in roles files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-bulk-delete-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role>`_
 
         :param names: An array of role names to delete
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -232,7 +232,7 @@ class SecurityClient(NamespacedClient):
           The bulk create or update roles API cannot update roles that are defined in roles files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-bulk-put-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role>`_
 
         :param roles: A dictionary of role name to RoleDescriptor objects to add or update
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -300,7 +300,7 @@ class SecurityClient(NamespacedClient):
           <p>A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-bulk-update-api-keys.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys>`_
 
         :param ids: The API key identifiers.
         :param expiration: Expiration time for the API keys. By default, API keys never
@@ -378,7 +378,7 @@ class SecurityClient(NamespacedClient):
           <p>Change the passwords of users in the native realm and built-in users.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-change-password.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password>`_
 
         :param username: The user whose password you want to change. If you do not specify
             this parameter, the password is changed for the current user.
@@ -445,7 +445,7 @@ class SecurityClient(NamespacedClient):
           The cache is also automatically cleared on state changes of the security index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-api-key-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache>`_
 
         :param ids: Comma-separated list of API key IDs to evict from the API key cache.
             To evict all API keys, use `*`. Does not support other wildcard patterns.
@@ -491,7 +491,7 @@ class SecurityClient(NamespacedClient):
           The cache is also automatically cleared for applications that have their privileges updated.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-privilege-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges>`_
 
         :param application: A comma-separated list of applications. To clear all applications,
             use an asterism (`*`). It does not support other wildcard patterns.
@@ -541,7 +541,7 @@ class SecurityClient(NamespacedClient):
           For more information, refer to the documentation about controlling the user cache.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms>`_
 
         :param realms: A comma-separated list of realms. To clear all realms, use an
             asterisk (`*`). It does not support other wildcard patterns.
@@ -591,7 +591,7 @@ class SecurityClient(NamespacedClient):
           <p>Evict roles from the native role cache.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-role-cache.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles>`_
 
         :param name: A comma-separated list of roles to evict from the role cache. To
             evict all roles, use an asterisk (`*`). It does not support other wildcard
@@ -643,7 +643,7 @@ class SecurityClient(NamespacedClient):
           The cache for tokens backed by the <code>service_tokens</code> file is cleared automatically on file changes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-clear-service-token-caches.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens>`_
 
         :param namespace: The namespace, which is a top-level grouping of service accounts.
         :param service: The name of the service, which must be unique within its namespace.
@@ -715,7 +715,7 @@ class SecurityClient(NamespacedClient):
           To configure or turn off the API key service, refer to API key service setting documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-create-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key>`_
 
         :param expiration: The expiration time for the API key. By default, API keys
             never expire.
@@ -805,7 +805,7 @@ class SecurityClient(NamespacedClient):
           Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-create-cross-cluster-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key>`_
 
         :param access: The access to be granted to this API key. The access is composed
             of permissions for cross-cluster search and cross-cluster replication. At
@@ -880,7 +880,7 @@ class SecurityClient(NamespacedClient):
           You must actively delete them if they are no longer needed.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-create-service-token.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token>`_
 
         :param namespace: The name of the namespace, which is a top-level grouping of
             service accounts.
@@ -966,7 +966,7 @@ class SecurityClient(NamespacedClient):
           The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delegate-pki-authentication.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki>`_
 
         :param x509_certificate_chain: The X509Certificate chain, which is represented
             as an ordered string array. Each string in the array is a base64-encoded
@@ -1030,7 +1030,7 @@ class SecurityClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-privilege.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges>`_
 
         :param application: The name of the application. Application privileges are always
             associated with exactly one application.
@@ -1093,7 +1093,7 @@ class SecurityClient(NamespacedClient):
           The delete roles API cannot remove roles that are defined in roles files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role>`_
 
         :param name: The name of the role.
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -1147,7 +1147,7 @@ class SecurityClient(NamespacedClient):
           The delete role mappings API cannot remove role mappings that are defined in role mapping files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-role-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping>`_
 
         :param name: The distinct name that identifies the role mapping. The name is
             used solely as an identifier to facilitate interaction via the API; it does
@@ -1203,7 +1203,7 @@ class SecurityClient(NamespacedClient):
           <p>Delete service account tokens for a service in a specified namespace.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-service-token.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token>`_
 
         :param namespace: The namespace, which is a top-level grouping of service accounts.
         :param service: The service name.
@@ -1265,7 +1265,7 @@ class SecurityClient(NamespacedClient):
           <p>Delete users from the native realm.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-delete-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user>`_
 
         :param username: An identifier for the user.
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -1319,7 +1319,7 @@ class SecurityClient(NamespacedClient):
           You can use this API to revoke a user's access to Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-disable-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user>`_
 
         :param username: An identifier for the user.
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -1376,7 +1376,7 @@ class SecurityClient(NamespacedClient):
           To re-enable a disabled user profile, use the enable user profile API .</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-disable-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile>`_
 
         :param uid: Unique identifier for the user profile.
         :param refresh: If 'true', Elasticsearch refreshes the affected shards to make
@@ -1429,7 +1429,7 @@ class SecurityClient(NamespacedClient):
           By default, when you create users, they are enabled.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-enable-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user>`_
 
         :param username: An identifier for the user.
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -1486,7 +1486,7 @@ class SecurityClient(NamespacedClient):
           If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-enable-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile>`_
 
         :param uid: A unique identifier for the user profile.
         :param refresh: If 'true', Elasticsearch refreshes the affected shards to make
@@ -1536,7 +1536,7 @@ class SecurityClient(NamespacedClient):
           Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-kibana-enrollment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/enroll/kibana"
@@ -1577,7 +1577,7 @@ class SecurityClient(NamespacedClient):
           The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-node-enrollment.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/enroll/node"
@@ -1626,7 +1626,7 @@ class SecurityClient(NamespacedClient):
           If you have <code>read_security</code>, <code>manage_api_key</code> or greater privileges (including <code>manage_security</code>), this API returns all API keys regardless of ownership.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key>`_
 
         :param active_only: A boolean flag that can be used to query API keys that are
             currently active. An API key is considered active if it is neither invalidated,
@@ -1704,7 +1704,7 @@ class SecurityClient(NamespacedClient):
           <p>Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-builtin-privileges.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/privilege/_builtin"
@@ -1749,7 +1749,7 @@ class SecurityClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-privileges.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges>`_
 
         :param application: The name of the application. Application privileges are always
             associated with exactly one application. If you do not specify this parameter,
@@ -1805,7 +1805,7 @@ class SecurityClient(NamespacedClient):
           The get roles API cannot retrieve roles that are defined in roles files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role>`_
 
         :param name: The name of the role. You can specify multiple roles as a comma-separated
             list. If you do not specify this parameter, the API returns information about
@@ -1856,7 +1856,7 @@ class SecurityClient(NamespacedClient):
           The get role mappings API cannot retrieve role mappings that are defined in role mapping files.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-role-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping>`_
 
         :param name: The distinct name that identifies the role mapping. The name is
             used solely as an identifier to facilitate interaction via the API; it does
@@ -1909,7 +1909,7 @@ class SecurityClient(NamespacedClient):
           <p>NOTE: Currently, only the <code>elastic/fleet-server</code> service account is available.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-service-accounts.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts>`_
 
         :param namespace: The name of the namespace. Omit this parameter to retrieve
             information about all service accounts. If you omit this parameter, you must
@@ -1967,7 +1967,7 @@ class SecurityClient(NamespacedClient):
           Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-service-credentials.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials>`_
 
         :param namespace: The name of the namespace.
         :param service: The service name.
@@ -2023,7 +2023,7 @@ class SecurityClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings>`_
 
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
@@ -2052,6 +2052,45 @@ class SecurityClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters()
+    def get_stats(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get security stats.</p>
+          <p>Gather security usage statistics from all node(s) within the cluster.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats>`_
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_security/stats"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="security.get_stats",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters(
         body_fields=(
             "grant_type",
@@ -2099,7 +2138,7 @@ class SecurityClient(NamespacedClient):
           If you want to invalidate a token immediately, you can do so by using the invalidate token API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-token.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token>`_
 
         :param grant_type: The type of grant. Supported grant types are: `password`,
             `_kerberos`, `client_credentials`, and `refresh_token`.
@@ -2173,7 +2212,7 @@ class SecurityClient(NamespacedClient):
           <p>Get information about users in the native realm and built-in users.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user>`_
 
         :param username: An identifier for the user. You can specify multiple usernames
             as a comma-separated list. If you omit this parameter, the API retrieves
@@ -2213,13 +2252,10 @@ class SecurityClient(NamespacedClient):
     def get_user_privileges(
         self,
         *,
-        application: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
-        priviledge: t.Optional[str] = None,
-        username: t.Optional[t.Union[None, str]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -2231,20 +2267,11 @@ class SecurityClient(NamespacedClient):
           To check whether a user has a specific list of privileges, use the has privileges API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-user-privileges.html>`_
-
-        :param application: The name of the application. Application privileges are always
-            associated with exactly one application. If you do not specify this parameter,
-            the API returns information about all privileges for all applications.
-        :param priviledge: The name of the privilege. If you do not specify this parameter,
-            the API returns information about all privileges for the requested application.
-        :param username:
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/user/_privileges"
         __query: t.Dict[str, t.Any] = {}
-        if application is not None:
-            __query["application"] = application
         if error_trace is not None:
             __query["error_trace"] = error_trace
         if filter_path is not None:
@@ -2253,10 +2280,6 @@ class SecurityClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
-        if priviledge is not None:
-            __query["priviledge"] = priviledge
-        if username is not None:
-            __query["username"] = username
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -2288,7 +2311,7 @@ class SecurityClient(NamespacedClient):
           Elastic reserves the right to change or remove this feature in future releases without prior notice.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-get-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile>`_
 
         :param uid: A unique identifier for the user profile.
         :param data: A comma-separated list of filters for the `data` field of the profile
@@ -2345,6 +2368,9 @@ class SecurityClient(NamespacedClient):
         human: t.Optional[bool] = None,
         password: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
+        refresh: t.Optional[
+            t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
+        ] = None,
         run_as: t.Optional[str] = None,
         username: t.Optional[str] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
@@ -2372,7 +2398,7 @@ class SecurityClient(NamespacedClient):
           <p>By default, API keys never expire. You can specify expiration information when you create the API keys.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-grant-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key>`_
 
         :param api_key: The API key.
         :param grant_type: The type of grant. Supported grant types are: `access_token`,
@@ -2382,6 +2408,9 @@ class SecurityClient(NamespacedClient):
             types.
         :param password: The user's password. If you specify the `password` grant type,
             this parameter is required. It is not valid with other grant types.
+        :param refresh: If 'true', Elasticsearch refreshes the affected shards to make
+            this operation visible to search. If 'wait_for', it waits for a refresh to
+            make this operation visible to search. If 'false', nothing is done with refreshes.
         :param run_as: The name of the user to be impersonated.
         :param username: The user name that identifies the user. If you specify the `password`
             grant type, this parameter is required. It is not valid with other grant
@@ -2403,6 +2432,8 @@ class SecurityClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if refresh is not None:
+            __query["refresh"] = refresh
         if not __body:
             if api_key is not None:
                 __body["api_key"] = api_key
@@ -2455,6 +2486,7 @@ class SecurityClient(NamespacedClient):
                         "manage_data_frame_transforms",
                         "manage_data_stream_global_retention",
                         "manage_enrich",
+                        "manage_esql",
                         "manage_ilm",
                         "manage_index_templates",
                         "manage_inference",
@@ -2480,6 +2512,7 @@ class SecurityClient(NamespacedClient):
                         "monitor_data_frame_transforms",
                         "monitor_data_stream_global_retention",
                         "monitor_enrich",
+                        "monitor_esql",
                         "monitor_inference",
                         "monitor_ml",
                         "monitor_rollup",
@@ -2519,7 +2552,7 @@ class SecurityClient(NamespacedClient):
           To check the privileges of other users, you must use the run as feature.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-has-privileges.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges>`_
 
         :param user: Username
         :param application:
@@ -2584,7 +2617,7 @@ class SecurityClient(NamespacedClient):
           Elastic reserves the right to change or remove this feature in future releases without prior notice.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-has-privileges-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile>`_
 
         :param privileges: An object containing all the privileges to be checked.
         :param uids: A list of profile IDs. The privileges are checked for associated
@@ -2658,7 +2691,7 @@ class SecurityClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-invalidate-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key>`_
 
         :param id:
         :param ids: A list of API key ids. This parameter cannot be used with any of
@@ -2742,7 +2775,7 @@ class SecurityClient(NamespacedClient):
           If none of these two are specified, then <code>realm_name</code> and/or <code>username</code> need to be specified.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-invalidate-token.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token>`_
 
         :param realm_name: The name of an authentication realm. This parameter cannot
             be used with either `refresh_token` or `token`.
@@ -2810,7 +2843,7 @@ class SecurityClient(NamespacedClient):
           These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-authenticate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate>`_
 
         :param nonce: Associate a client session with an ID token and mitigate replay
             attacks. This value needs to be the same as the one that was provided to
@@ -2867,12 +2900,12 @@ class SecurityClient(NamespacedClient):
         )
 
     @_rewrite_parameters(
-        body_fields=("access_token", "refresh_token"),
+        body_fields=("token", "refresh_token"),
     )
     def oidc_logout(
         self,
         *,
-        access_token: t.Optional[str] = None,
+        token: t.Optional[str] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -2890,13 +2923,13 @@ class SecurityClient(NamespacedClient):
           These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-logout.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout>`_
 
-        :param access_token: The access token to be invalidated.
+        :param token: The access token to be invalidated.
         :param refresh_token: The refresh token to be invalidated.
         """
-        if access_token is None and body is None:
-            raise ValueError("Empty value passed for parameter 'access_token'")
+        if token is None and body is None:
+            raise ValueError("Empty value passed for parameter 'token'")
         __path_parts: t.Dict[str, str] = {}
         __path = "/_security/oidc/logout"
         __query: t.Dict[str, t.Any] = {}
@@ -2910,8 +2943,8 @@ class SecurityClient(NamespacedClient):
         if pretty is not None:
             __query["pretty"] = pretty
         if not __body:
-            if access_token is not None:
-                __body["access_token"] = access_token
+            if token is not None:
+                __body["token"] = token
             if refresh_token is not None:
                 __body["refresh_token"] = refresh_token
         __headers = {"accept": "application/json", "content-type": "application/json"}
@@ -2952,7 +2985,7 @@ class SecurityClient(NamespacedClient):
           These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-oidc-prepare-authentication.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication>`_
 
         :param iss: In the case of a third party initiated single sign on, this is the
             issuer identifier for the OP that the RP is to send the authentication request
@@ -3048,7 +3081,7 @@ class SecurityClient(NamespacedClient):
           <p>Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: <code>/</code>, <code>*</code>, <code>:</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-put-privileges.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges>`_
 
         :param privileges:
         :param refresh: If `true` (the default) then refresh the affected shards to make
@@ -3126,6 +3159,7 @@ class SecurityClient(NamespacedClient):
                         "manage_data_frame_transforms",
                         "manage_data_stream_global_retention",
                         "manage_enrich",
+                        "manage_esql",
                         "manage_ilm",
                         "manage_index_templates",
                         "manage_inference",
@@ -3151,6 +3185,7 @@ class SecurityClient(NamespacedClient):
                         "monitor_data_frame_transforms",
                         "monitor_data_stream_global_retention",
                         "monitor_enrich",
+                        "monitor_esql",
                         "monitor_inference",
                         "monitor_ml",
                         "monitor_rollup",
@@ -3200,9 +3235,12 @@ class SecurityClient(NamespacedClient):
           File-based role management is not available in Elastic Serverless.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-put-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role>`_
 
-        :param name: The name of the role.
+        :param name: The name of the role that is being created or updated. On Elasticsearch
+            Serverless, the role name must begin with a letter or digit and can only
+            contain letters, digits and the characters '_', '-', and '.'. Each role must
+            have a unique name, as this will serve as the identifier for that role.
         :param applications: A list of application privilege entries.
         :param cluster: A list of cluster privileges. These privileges define the cluster-level
             actions for users with this role.
@@ -3332,7 +3370,7 @@ class SecurityClient(NamespacedClient):
           If the format of the template is set to &quot;json&quot; then the template is expected to produce a JSON string or an array of JSON strings for the role names.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-put-role-mapping.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping>`_
 
         :param name: The distinct name that identifies the role mapping. The name is
             used solely as an identifier to facilitate interaction via the API; it does
@@ -3434,7 +3472,7 @@ class SecurityClient(NamespacedClient):
           To change a user's password without updating any other fields, use the change password API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-put-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user>`_
 
         :param username: An identifier for the user. NOTE: Usernames must be at least
             1 and no more than 507 characters. They can contain alphanumeric characters
@@ -3528,7 +3566,7 @@ class SecurityClient(NamespacedClient):
         pretty: t.Optional[bool] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         size: t.Optional[int] = None,
         sort: t.Optional[
@@ -3550,10 +3588,11 @@ class SecurityClient(NamespacedClient):
           You can optionally filter the results with a query.</p>
           <p>To use this API, you must have at least the <code>manage_own_api_key</code> or the <code>read_security</code> cluster privileges.
           If you have only the <code>manage_own_api_key</code> privilege, this API returns only the API keys that you own.
-          If you have the <code>read_security</code>, <code>manage_api_key</code>, or greater privileges (including <code>manage_security</code>), this API returns all API keys regardless of ownership.</p>
+          If you have the <code>read_security</code>, <code>manage_api_key</code>, or greater privileges (including <code>manage_security</code>), this API returns all API keys regardless of ownership.
+          Refer to the linked documentation for examples of how to find API keys:</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-query-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys>`_
 
         :param aggregations: Any aggregations to run over the corpus of returned API
             keys. Aggregations and queries work together. Aggregations are computed only
@@ -3674,7 +3713,7 @@ class SecurityClient(NamespacedClient):
         pretty: t.Optional[bool] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         size: t.Optional[int] = None,
         sort: t.Optional[
@@ -3696,7 +3735,7 @@ class SecurityClient(NamespacedClient):
           Also, the results can be paginated and sorted.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-query-role.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role>`_
 
         :param from_: The starting document offset. It must not be negative. By default,
             you cannot page through more than 10,000 hits using the `from` and `size`
@@ -3767,7 +3806,7 @@ class SecurityClient(NamespacedClient):
         pretty: t.Optional[bool] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         size: t.Optional[int] = None,
         sort: t.Optional[
@@ -3789,7 +3828,7 @@ class SecurityClient(NamespacedClient):
           This API is only for native users.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-query-user.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user>`_
 
         :param from_: The starting document offset. It must not be negative. By default,
             you cannot page through more than 10,000 hits using the `from` and `size`
@@ -3882,7 +3921,7 @@ class SecurityClient(NamespacedClient):
           This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-authenticate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate>`_
 
         :param content: The SAML response as it was sent by the user's browser, usually
             a Base64 encoded XML document.
@@ -3955,7 +3994,7 @@ class SecurityClient(NamespacedClient):
           The caller of this API must prepare the request accordingly so that this API can handle either of them.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-complete-logout.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout>`_
 
         :param ids: A JSON array with all the valid SAML Request Ids that the caller
             of the API has for the current user.
@@ -4031,7 +4070,7 @@ class SecurityClient(NamespacedClient):
           Thus the user can be redirected back to their IdP.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-invalidate.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate>`_
 
         :param query_string: The query part of the URL that the user was redirected to
             by the SAML IdP to initiate the Single Logout. This query should include
@@ -4106,7 +4145,7 @@ class SecurityClient(NamespacedClient):
           If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-logout.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout>`_
 
         :param token: The access token that was returned as a response to calling the
             SAML authenticate API. Alternatively, the most recent token that was received
@@ -4176,7 +4215,7 @@ class SecurityClient(NamespacedClient):
           The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-prepare-authentication.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication>`_
 
         :param acs: The Assertion Consumer Service URL that matches the one of the SAML
             realms in Elasticsearch. The realm is used to generate the authentication
@@ -4237,7 +4276,7 @@ class SecurityClient(NamespacedClient):
           This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-saml-sp-metadata.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata>`_
 
         :param realm_name: The name of the SAML realm in Elasticsearch.
         """
@@ -4290,7 +4329,7 @@ class SecurityClient(NamespacedClient):
           Elastic reserves the right to change or remove this feature in future releases without prior notice.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-suggest-user-profile.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles>`_
 
         :param data: A comma-separated list of filters for the `data` field of the profile
             document. To return all content use `data=*`. To return a subset of content,
@@ -4377,7 +4416,7 @@ class SecurityClient(NamespacedClient):
           This change can occur if the owner user's permissions have changed since the API key was created or last modified.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-update-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key>`_
 
         :param id: The ID of the API key to update.
         :param expiration: The expiration time for the API key. By default, API keys
@@ -4463,9 +4502,10 @@ class SecurityClient(NamespacedClient):
           <p>This API supports updates to an API key's access scope, metadata, and expiration.
           The owner user's information, such as the <code>username</code> and <code>realm</code>, is also updated automatically on every call.</p>
           <p>NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.</p>
+          <p>To learn more about how to use this API, refer to the <a href="https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples">Update cross cluter API key API examples page</a>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-update-cross-cluster-api-key.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key>`_
 
         :param id: The ID of the cross-cluster API key to update.
         :param access: The access to be granted to this API key. The access is composed
@@ -4544,7 +4584,7 @@ class SecurityClient(NamespacedClient):
           This API does not yet support configuring the settings for indices before they are in use.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-update-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -4629,7 +4669,7 @@ class SecurityClient(NamespacedClient):
           The <code>update_profile_data</code> global privilege grants privileges for updating only the allowed namespaces.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-update-user-profile-data.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data>`_
 
         :param uid: A unique identifier for the user profile.
         :param data: Non-searchable data that you want to associate with the user profile.
diff -pruN 8.17.2-2/elasticsearch/_sync/client/shutdown.py 9.2.0-1/elasticsearch/_sync/client/shutdown.py
--- 8.17.2-2/elasticsearch/_sync/client/shutdown.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/shutdown.py	2025-10-28 16:50:53.000000000 +0000
@@ -33,13 +33,9 @@ class ShutdownClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -53,7 +49,7 @@ class ShutdownClient(NamespacedClient):
           <p>If the operator privileges feature is enabled, you must be an operator to use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-shutdown.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node>`_
 
         :param node_id: The node id of node to be removed from the shutdown state
         :param master_timeout: Period to wait for a connection to the master node. If
@@ -97,13 +93,8 @@ class ShutdownClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -115,14 +106,12 @@ class ShutdownClient(NamespacedClient):
           <p>If the operator privileges feature is enabled, you must be an operator to use this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-shutdown.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node>`_
 
         :param node_id: Which node for which to retrieve the shutdown status
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
             returns an error.
-        :param timeout: Period to wait for a response. If no response is received before
-            the timeout expires, the request fails and returns an error.
         """
         __path_parts: t.Dict[str, str]
         if node_id not in SKIP_IN_PATH:
@@ -142,8 +131,6 @@ class ShutdownClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "GET",
@@ -169,14 +156,10 @@ class ShutdownClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
         target_node_name: t.Optional[str] = None,
-        timeout: t.Optional[
-            t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]]
-        ] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -194,7 +177,7 @@ class ShutdownClient(NamespacedClient):
           Monitor the node shutdown status to determine when it is safe to stop Elasticsearch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-shutdown.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node>`_
 
         :param node_id: The node identifier. This parameter is not validated against
             the cluster's active nodes. This enables you to register a node for shut
diff -pruN 8.17.2-2/elasticsearch/_sync/client/simulate.py 9.2.0-1/elasticsearch/_sync/client/simulate.py
--- 8.17.2-2/elasticsearch/_sync/client/simulate.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/simulate.py	2025-10-28 16:50:53.000000000 +0000
@@ -35,7 +35,7 @@ class SimulateClient(NamespacedClient):
         body_fields=(
             "docs",
             "component_template_substitutions",
-            "index_template_subtitutions",
+            "index_template_substitutions",
             "mapping_addition",
             "pipeline_substitutions",
         ),
@@ -52,10 +52,11 @@ class SimulateClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
-        index_template_subtitutions: t.Optional[
+        index_template_substitutions: t.Optional[
             t.Mapping[str, t.Mapping[str, t.Any]]
         ] = None,
         mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None,
+        merge_type: t.Optional[t.Union[str, t.Literal["index", "template"]]] = None,
         pipeline: t.Optional[str] = None,
         pipeline_substitutions: t.Optional[
             t.Mapping[str, t.Mapping[str, t.Any]]
@@ -81,7 +82,7 @@ class SimulateClient(NamespacedClient):
           These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/simulate-ingest-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest>`_
 
         :param docs: Sample documents to test in the pipeline.
         :param index: The index to simulate ingesting into. This value can be overridden
@@ -90,9 +91,14 @@ class SimulateClient(NamespacedClient):
             an index argument.
         :param component_template_substitutions: A map of component template names to
             substitute component template definition objects.
-        :param index_template_subtitutions: A map of index template names to substitute
+        :param index_template_substitutions: A map of index template names to substitute
             index template definition objects.
         :param mapping_addition:
+        :param merge_type: The mapping merge type if mapping overrides are being provided
+            in mapping_addition. The allowed values are one of index or template. The
+            index option merges mappings the way they would be merged into an existing
+            index. The template option merges mappings the way they would be merged into
+            a template.
         :param pipeline: The pipeline to use as the default pipeline. This value can
             be used to override the default pipeline of the index.
         :param pipeline_substitutions: Pipelines to test. If you don’t specify the `pipeline`
@@ -116,6 +122,8 @@ class SimulateClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if merge_type is not None:
+            __query["merge_type"] = merge_type
         if pipeline is not None:
             __query["pipeline"] = pipeline
         if pretty is not None:
@@ -127,8 +135,8 @@ class SimulateClient(NamespacedClient):
                 __body["component_template_substitutions"] = (
                     component_template_substitutions
                 )
-            if index_template_subtitutions is not None:
-                __body["index_template_subtitutions"] = index_template_subtitutions
+            if index_template_substitutions is not None:
+                __body["index_template_substitutions"] = index_template_substitutions
             if mapping_addition is not None:
                 __body["mapping_addition"] = mapping_addition
             if pipeline_substitutions is not None:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/slm.py 9.2.0-1/elasticsearch/_sync/client/slm.py
--- 8.17.2-2/elasticsearch/_sync/client/slm.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/slm.py	2025-10-28 16:50:53.000000000 +0000
@@ -45,7 +45,7 @@ class SlmClient(NamespacedClient):
           This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-delete-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle>`_
 
         :param policy_id: The id of the snapshot lifecycle policy to remove
         :param master_timeout: The period to wait for a connection to the master node.
@@ -101,7 +101,7 @@ class SlmClient(NamespacedClient):
           The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-execute-lifecycle.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle>`_
 
         :param policy_id: The id of the snapshot lifecycle policy to be executed
         :param master_timeout: The period to wait for a connection to the master node.
@@ -156,7 +156,7 @@ class SlmClient(NamespacedClient):
           The retention policy is normally applied according to its schedule.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-execute-retention.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -208,7 +208,7 @@ class SlmClient(NamespacedClient):
           Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle>`_
 
         :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve
         :param master_timeout: The period to wait for a connection to the master node.
@@ -265,7 +265,7 @@ class SlmClient(NamespacedClient):
           Get global and policy-level statistics about actions taken by snapshot lifecycle management.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats>`_
 
         :param master_timeout: Period to wait for a connection to the master node. If
             no response is received before the timeout expires, the request fails and
@@ -315,7 +315,7 @@ class SlmClient(NamespacedClient):
           <p>Get the snapshot lifecycle management status.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-get-status.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -379,7 +379,7 @@ class SlmClient(NamespacedClient):
           Only the latest version of a policy is stored.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-put-policy.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle>`_
 
         :param policy_id: The identifier for the snapshot lifecycle policy you want to
             create or update.
@@ -431,11 +431,7 @@ class SlmClient(NamespacedClient):
                 __body["retention"] = retention
             if schedule is not None:
                 __body["schedule"] = schedule
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -465,7 +461,7 @@ class SlmClient(NamespacedClient):
           Manually starting SLM is necessary only if it has been stopped using the stop SLM API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-start.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -523,7 +519,7 @@ class SlmClient(NamespacedClient):
           Use the get snapshot lifecycle management status API to see if SLM is running.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/slm-api-stop.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
diff -pruN 8.17.2-2/elasticsearch/_sync/client/snapshot.py 9.2.0-1/elasticsearch/_sync/client/snapshot.py
--- 8.17.2-2/elasticsearch/_sync/client/snapshot.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/snapshot.py	2025-10-28 16:50:53.000000000 +0000
@@ -50,11 +50,18 @@ class SnapshotClient(NamespacedClient):
           Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clean-up-snapshot-repo-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository>`_
 
-        :param name: Snapshot repository to clean up.
-        :param master_timeout: Period to wait for a connection to the master node.
-        :param timeout: Period to wait for a response.
+        :param name: The name of the snapshot repository to clean up.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If the master node is not available before the timeout expires, the request
+            fails and returns an error. To indicate that the request should never timeout,
+            set it to `-1`
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. To indicate
+            that the request should never timeout, set it to `-1`.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -98,7 +105,6 @@ class SnapshotClient(NamespacedClient):
         human: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
-        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -108,14 +114,17 @@ class SnapshotClient(NamespacedClient):
           Clone part of all of a snapshot into another snapshot in the same repository.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clone-snapshot-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone>`_
 
-        :param repository: A repository name
-        :param snapshot: The name of the snapshot to clone from
-        :param target_snapshot: The name of the cloned snapshot to create
-        :param indices:
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout:
+        :param repository: The name of the snapshot repository that both source and target
+            snapshot belong to.
+        :param snapshot: The source snapshot name.
+        :param target_snapshot: The target snapshot name.
+        :param indices: A comma-separated list of indices to include in the snapshot.
+            Multi-target syntax is supported.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
         """
         if repository in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'repository'")
@@ -143,8 +152,6 @@ class SnapshotClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
-        if timeout is not None:
-            __query["timeout"] = timeout
         if not __body:
             if indices is not None:
                 __body["indices"] = indices
@@ -161,6 +168,7 @@ class SnapshotClient(NamespacedClient):
 
     @_rewrite_parameters(
         body_fields=(
+            "expand_wildcards",
             "feature_states",
             "ignore_unavailable",
             "include_global_state",
@@ -175,6 +183,14 @@ class SnapshotClient(NamespacedClient):
         repository: str,
         snapshot: str,
         error_trace: t.Optional[bool] = None,
+        expand_wildcards: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]]
+                ],
+                t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]],
+            ]
+        ] = None,
         feature_states: t.Optional[t.Sequence[str]] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
@@ -195,15 +211,22 @@ class SnapshotClient(NamespacedClient):
           Take a snapshot of a cluster or of data streams and indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-snapshot-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create>`_
 
-        :param repository: Repository for the snapshot.
-        :param snapshot: Name of the snapshot. Must be unique in the repository.
-        :param feature_states: Feature states to include in the snapshot. Each feature
+        :param repository: The name of the repository for the snapshot.
+        :param snapshot: The name of the snapshot. It supportes date math. It must be
+            unique in the repository.
+        :param expand_wildcards: Determines how wildcard patterns in the `indices` parameter
+            match data streams and indices. It supports comma-separated values such as
+            `open,hidden`.
+        :param feature_states: The feature states to include in the snapshot. Each feature
             state includes one or more system indices containing related data. You can
             view a list of eligible features using the get features API. If `include_global_state`
             is `true`, all current feature states are included by default. If `include_global_state`
-            is `false`, no feature states are included by default.
+            is `false`, no feature states are included by default. Note that specifying
+            an empty array will result in the default behavior. To exclude all feature
+            states, regardless of the `include_global_state` value, specify an array
+            with only the value `none` (`["none"]`).
         :param ignore_unavailable: If `true`, the request ignores data streams and indices
             in `indices` that are missing or closed. If `false`, the request returns
             an error for any data stream or index that is missing or closed.
@@ -212,18 +235,24 @@ class SnapshotClient(NamespacedClient):
             composable index templates, legacy index templates, ingest pipelines, and
             ILM policies. It also includes data stored in system indices, such as Watches
             and task records (configurable via `feature_states`).
-        :param indices: Data streams and indices to include in the snapshot. Supports
-            multi-target syntax. Includes all data streams and indices by default.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
-        :param metadata: Optional metadata for the snapshot. May have any contents. Must
-            be less than 1024 bytes. This map is not automatically generated by Elasticsearch.
-        :param partial: If `true`, allows restoring a partial snapshot of indices with
-            unavailable shards. Only shards that were successfully included in the snapshot
-            will be restored. All missing shards will be recreated as empty. If `false`,
-            the entire restore operation will fail if one or more indices included in
-            the snapshot do not have all primary shards available.
+        :param indices: A comma-separated list of data streams and indices to include
+            in the snapshot. It supports a multi-target syntax. The default is an empty
+            array (`[]`), which includes all regular data streams and regular indices.
+            To exclude all data streams and indices, use `-*`. You can't use this parameter
+            to include or exclude system indices or system data streams from a snapshot.
+            Use `feature_states` instead.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param metadata: Arbitrary metadata to the snapshot, such as a record of who
+            took the snapshot, why it was taken, or any other useful data. It can have
+            any contents but it must be less than 1024 bytes. This information is not
+            automatically generated by Elasticsearch.
+        :param partial: If `true`, it enables you to restore a partial snapshot of indices
+            with unavailable shards. Only shards that were successfully included in the
+            snapshot will be restored. All missing shards will be recreated as empty.
+            If `false`, the entire restore operation will fail if one or more indices
+            included in the snapshot do not have all primary shards available.
         :param wait_for_completion: If `true`, the request returns a response when the
             snapshot is complete. If `false`, the request returns a response when the
             snapshot initializes.
@@ -252,6 +281,8 @@ class SnapshotClient(NamespacedClient):
         if wait_for_completion is not None:
             __query["wait_for_completion"] = wait_for_completion
         if not __body:
+            if expand_wildcards is not None:
+                __body["expand_wildcards"] = expand_wildcards
             if feature_states is not None:
                 __body["feature_states"] = feature_states
             if ignore_unavailable is not None:
@@ -303,15 +334,26 @@ class SnapshotClient(NamespacedClient):
           IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.
           To register a snapshot repository, the cluster's global metadata must be writeable.
           Ensure there are no cluster blocks (for example, <code>cluster.blocks.read_only</code> and <code>clsuter.blocks.read_only_allow_delete</code> settings) that prevent write access.</p>
+          <p>Several options for this API can be specified using a query parameter or a request body parameter.
+          If both parameters are specified, only the query parameter is used.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository>`_
 
-        :param name: A repository name
+        :param name: The name of the snapshot repository to register or update.
         :param repository:
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
-        :param verify: Whether to verify the repository after creation
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. To indicate
+            that the request should never timeout, set it to `-1`.
+        :param verify: If `true`, the request verifies the repository is functional on
+            all master and data nodes in the cluster. If `false`, this verification is
+            skipped. You can also perform this verification with the verify snapshot
+            repository API.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -361,6 +403,7 @@ class SnapshotClient(NamespacedClient):
         human: t.Optional[bool] = None,
         master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
+        wait_for_completion: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -368,11 +411,17 @@ class SnapshotClient(NamespacedClient):
           <p>Delete snapshots.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete>`_
 
-        :param repository: A repository name
-        :param snapshot: A comma-separated list of snapshot names
-        :param master_timeout: Explicit operation timeout for connection to master node
+        :param repository: The name of the repository to delete a snapshot from.
+        :param snapshot: A comma-separated list of snapshot names to delete. It also
+            accepts wildcards (`*`).
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param wait_for_completion: If `true`, the request returns a response when the
+            matching snapshots are all deleted. If `false`, the request returns a response
+            as soon as the deletes are scheduled.
         """
         if repository in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'repository'")
@@ -394,6 +443,8 @@ class SnapshotClient(NamespacedClient):
             __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
+        if wait_for_completion is not None:
+            __query["wait_for_completion"] = wait_for_completion
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "DELETE",
@@ -424,12 +475,18 @@ class SnapshotClient(NamespacedClient):
           The snapshots themselves are left untouched and in place.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-repo-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository>`_
 
-        :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns
-            are supported.
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
+        :param name: The ame of the snapshot repositories to unregister. Wildcard (`*`)
+            patterns are supported.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. To indicate
+            that the request should never timeout, set it to `-1`.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -493,56 +550,94 @@ class SnapshotClient(NamespacedClient):
                 ],
             ]
         ] = None,
+        state: t.Optional[
+            t.Union[
+                t.Sequence[
+                    t.Union[
+                        str,
+                        t.Literal[
+                            "FAILED",
+                            "INCOMPATIBLE",
+                            "IN_PROGRESS",
+                            "PARTIAL",
+                            "SUCCESS",
+                        ],
+                    ]
+                ],
+                t.Union[
+                    str,
+                    t.Literal[
+                        "FAILED", "INCOMPATIBLE", "IN_PROGRESS", "PARTIAL", "SUCCESS"
+                    ],
+                ],
+            ]
+        ] = None,
         verbose: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
           <p>Get snapshot information.</p>
-
-
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-api.html>`_
-
-        :param repository: Comma-separated list of snapshot repository names used to
-            limit the request. Wildcard (*) expressions are supported.
-        :param snapshot: Comma-separated list of snapshot names to retrieve. Also accepts
-            wildcards (*). - To get information about all snapshots in a registered repository,
-            use a wildcard (*) or _all. - To get information about any snapshots that
-            are currently running, use _current.
-        :param after: Offset identifier to start pagination from as returned by the next
-            field in the response body.
-        :param from_sort_value: Value of the current sort column at which to start retrieval.
-            Can either be a string snapshot- or repository name when sorting by snapshot
-            or repository name, a millisecond time value or a number when sorting by
-            index- or shard count.
-        :param ignore_unavailable: If false, the request returns an error for any snapshots
+          <p>NOTE: The <code>after</code> parameter and <code>next</code> field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots.
+          It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration.
+          Snapshots concurrently created may be seen during an iteration.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get>`_
+
+        :param repository: A comma-separated list of snapshot repository names used to
+            limit the request. Wildcard (`*`) expressions are supported.
+        :param snapshot: A comma-separated list of snapshot names to retrieve Wildcards
+            (`*`) are supported. * To get information about all snapshots in a registered
+            repository, use a wildcard (`*`) or `_all`. * To get information about any
+            snapshots that are currently running, use `_current`.
+        :param after: An offset identifier to start pagination from as returned by the
+            next field in the response body.
+        :param from_sort_value: The value of the current sort column at which to start
+            retrieval. It can be a string `snapshot-` or a repository name when sorting
+            by snapshot or repository name. It can be a millisecond time value or a number
+            when sorting by `index-` or shard count.
+        :param ignore_unavailable: If `false`, the request returns an error for any snapshots
             that are unavailable.
-        :param include_repository: If true, returns the repository name in each snapshot.
-        :param index_details: If true, returns additional information about each index
-            in the snapshot comprising the number of shards in the index, the total size
-            of the index in bytes, and the maximum number of segments per shard in the
-            index. Defaults to false, meaning that this information is omitted.
-        :param index_names: If true, returns the name of each index in each snapshot.
-        :param master_timeout: Period to wait for a connection to the master node. If
-            no response is received before the timeout expires, the request fails and
-            returns an error.
+        :param include_repository: If `true`, the response includes the repository name
+            in each snapshot.
+        :param index_details: If `true`, the response includes additional information
+            about each index in the snapshot comprising the number of shards in the index,
+            the total size of the index in bytes, and the maximum number of segments
+            per shard in the index. The default is `false`, meaning that this information
+            is omitted.
+        :param index_names: If `true`, the response includes the name of each index in
+            each snapshot.
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
         :param offset: Numeric offset to start pagination from based on the snapshots
             matching this request. Using a non-zero value for this parameter is mutually
             exclusive with using the after parameter. Defaults to 0.
-        :param order: Sort order. Valid values are asc for ascending and desc for descending
-            order. Defaults to asc, meaning ascending order.
-        :param size: Maximum number of snapshots to return. Defaults to 0 which means
-            return all that match the request without limit.
-        :param slm_policy_filter: Filter snapshots by a comma-separated list of SLM policy
-            names that snapshots belong to. Also accepts wildcards (*) and combinations
-            of wildcards followed by exclude patterns starting with -. To include snapshots
-            not created by an SLM policy you can use the special pattern _none that will
-            match all snapshots without an SLM policy.
-        :param sort: Allows setting a sort order for the result. Defaults to start_time,
-            i.e. sorting by snapshot start time stamp.
-        :param verbose: If true, returns additional information about each snapshot such
-            as the version of Elasticsearch which took the snapshot, the start and end
-            times of the snapshot, and the number of shards snapshotted.
+        :param order: The sort order. Valid values are `asc` for ascending and `desc`
+            for descending order. The default behavior is ascending order.
+        :param size: The maximum number of snapshots to return. The default is 0, which
+            means to return all that match the request without limit.
+        :param slm_policy_filter: Filter snapshots by a comma-separated list of snapshot
+            lifecycle management (SLM) policy names that snapshots belong to. You can
+            use wildcards (`*`) and combinations of wildcards followed by exclude patterns
+            starting with `-`. For example, the pattern `*,-policy-a-\\*` will return
+            all snapshots except for those that were created by an SLM policy with a
+            name starting with `policy-a-`. Note that the wildcard pattern `*` matches
+            all snapshots created by an SLM policy but not those snapshots that were
+            not created by an SLM policy. To include snapshots that were not created
+            by an SLM policy, you can use the special pattern `_none` that will match
+            all snapshots without an SLM policy.
+        :param sort: The sort order for the result. The default behavior is sorting by
+            snapshot start time stamp.
+        :param state: Only return snapshots with a state found in the given comma-separated
+            list of snapshot states. The default is all snapshot states.
+        :param verbose: If `true`, returns additional information about each snapshot
+            such as the version of Elasticsearch which took the snapshot, the start and
+            end times of the snapshot, and the number of shards snapshotted. NOTE: The
+            parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`,
+            and `sort` are not supported when you set `verbose=false` and the sort order
+            for requests with `verbose=false` is undefined.
         """
         if repository in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'repository'")
@@ -586,6 +681,8 @@ class SnapshotClient(NamespacedClient):
             __query["slm_policy_filter"] = slm_policy_filter
         if sort is not None:
             __query["sort"] = sort
+        if state is not None:
+            __query["state"] = state
         if verbose is not None:
             __query["verbose"] = verbose
         __headers = {"accept": "application/json"}
@@ -616,12 +713,18 @@ class SnapshotClient(NamespacedClient):
           <p>Get snapshot repository information.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository>`_
 
-        :param name: A comma-separated list of repository names
-        :param local: Return local information, do not retrieve the state from master
-            node (default: false)
-        :param master_timeout: Explicit operation timeout for connection to master node
+        :param name: A comma-separated list of snapshot repository names used to limit
+            the request. Wildcard (`*`) expressions are supported including combining
+            wildcards with exclude patterns starting with `-`. To get information about
+            all snapshot repositories registered in the cluster, omit this parameter
+            or use `*` or `_all`.
+        :param local: If `true`, the request gets information from the local node only.
+            If `false`, the request gets information from the master node.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
         """
         __path_parts: t.Dict[str, str]
         if name not in SKIP_IN_PATH:
@@ -678,20 +781,28 @@ class SnapshotClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Analyze a snapshot repository.
-          Analyze the performance characteristics and any incorrect behaviour found in a repository.</p>
-          <p>The response exposes implementation details of the analysis which may change from version to version.
-          The response body format is therefore not considered stable and may be different in newer versions.</p>
+          <p>Analyze a snapshot repository.</p>
+          <p>Performs operations on a snapshot repository in order to check for incorrect behaviour.</p>
           <p>There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch.
-          Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.</p>
+          Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do.
+          This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.</p>
           <p>The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations.
           Run your first analysis with the default parameter values to check for simple problems.
-          If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a <code>blob_count</code> of at least <code>2000</code>, a <code>max_blob_size</code> of at least <code>2gb</code>, a <code>max_total_data_size</code> of at least <code>1tb</code>, and a <code>register_operation_count</code> of at least <code>100</code>.
+          Some repositories may behave correctly when lightly loaded but incorrectly under production-like workloads.
+          If the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a <code>blob_count</code> of at least <code>2000</code>, a <code>max_blob_size</code> of at least <code>2gb</code>, a <code>max_total_data_size</code> of at least <code>1tb</code>, and a <code>register_operation_count</code> of at least <code>100</code>.
           Always specify a generous timeout, possibly <code>1h</code> or longer, to allow time for each analysis to run to completion.
+          Some repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster.
           Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once.</p>
           <p>If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly.
           This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support.
           If so, this storage system is not suitable for use as a snapshot repository.
+          Repository analysis triggers conditions that occur only rarely when taking snapshots in a production system.
+          Snapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures.
+          However your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis.
+          You can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using.
+          For instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis.
+          This allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch.
+          Please do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol.
           You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects.</p>
           <p>If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took.
           You can use this information to determine the performance of your storage system.
@@ -719,14 +830,17 @@ class SnapshotClient(NamespacedClient):
           This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself.
           You must ensure this load does not affect other users of these systems.
           Analyses respect the repository settings <code>max_snapshot_bytes_per_sec</code> and <code>max_restore_bytes_per_sec</code> if available and the cluster setting <code>indices.recovery.max_bytes_per_sec</code> which you can use to limit the bandwidth they consume.</p>
-          <p>NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.</p>
+          <p>NOTE: This API is intended for exploratory use by humans.
+          You should expect the request parameters and the response format to vary in future versions.
+          The response exposes immplementation details of the analysis which may change from version to version.</p>
           <p>NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones.
           A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version.
           This indicates it behaves incorrectly in ways that the former version did not detect.
           You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch.</p>
           <p>NOTE: This API may not work correctly in a mixed-version cluster.</p>
           <p><em>Implementation details</em></p>
-          <p>NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions.</p>
+          <p>NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions.
+          The request parameters and response format depend on details of the implementation so may also be different in newer versions.</p>
           <p>The analysis comprises a number of blob-level tasks, as set by the <code>blob_count</code> parameter and a number of compare-and-exchange operations on linearizable registers, as set by the <code>register_operation_count</code> parameter.
           These tasks are distributed over the data and master-eligible nodes in the cluster for execution.</p>
           <p>For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote.
@@ -754,39 +868,44 @@ class SnapshotClient(NamespacedClient):
           Some operations also verify the behavior on small blobs with sizes other than 8 bytes.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/repo-analysis-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze>`_
 
         :param name: The name of the repository.
         :param blob_count: The total number of blobs to write to the repository during
-            the test. For realistic experiments, you should set it to at least `2000`.
+            the test. For realistic experiments, set this parameter to at least `2000`.
         :param concurrency: The number of operations to run concurrently during the test.
+            For realistic experiments, leave this parameter unset.
         :param detailed: Indicates whether to return detailed results, including timing
             information for every operation performed during the analysis. If false,
             it returns only a summary of the analysis.
         :param early_read_node_count: The number of nodes on which to perform an early
             read operation while writing each blob. Early read operations are only rarely
-            performed.
+            performed. For realistic experiments, leave this parameter unset.
         :param max_blob_size: The maximum size of a blob to be written during the test.
-            For realistic experiments, you should set it to at least `2gb`.
+            For realistic experiments, set this parameter to at least `2gb`.
         :param max_total_data_size: An upper limit on the total size of all the blobs
-            written during the test. For realistic experiments, you should set it to
+            written during the test. For realistic experiments, set this parameter to
             at least `1tb`.
         :param rare_action_probability: The probability of performing a rare action such
-            as an early read, an overwrite, or an aborted write on each blob.
+            as an early read, an overwrite, or an aborted write on each blob. For realistic
+            experiments, leave this parameter unset.
         :param rarely_abort_writes: Indicates whether to rarely cancel writes before
-            they complete.
+            they complete. For realistic experiments, leave this parameter unset.
         :param read_node_count: The number of nodes on which to read a blob after writing.
+            For realistic experiments, leave this parameter unset.
         :param register_operation_count: The minimum number of linearizable register
-            operations to perform in total. For realistic experiments, you should set
-            it to at least `100`.
+            operations to perform in total. For realistic experiments, set this parameter
+            to at least `100`.
         :param seed: The seed for the pseudo-random number generator used to generate
             the list of operations performed during the test. To repeat the same set
             of operations in multiple experiments, use the same seed in each experiment.
             Note that the operations are performed concurrently so might not always happen
-            in the same order on each run.
+            in the same order on each run. For realistic experiments, leave this parameter
+            unset.
         :param timeout: The period of time to wait for the test to complete. If no response
             is received before the timeout expires, the test is cancelled and returns
-            an error.
+            an error. For realistic experiments, set this parameter sufficiently long
+            to allow the test to complete.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -879,21 +998,39 @@ class SnapshotClient(NamespacedClient):
           It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting.</p>
           <p>NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.</p>
           <p>NOTE: This API may not work correctly in a mixed-version cluster.</p>
+          <p>The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster.
+          For instance, by default it will only use at most half of the <code>snapshot_meta</code> threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool.
+          If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster.
+          For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API.</p>
+          <p>The response exposes implementation details of the analysis which may change from version to version.
+          The response body format is therefore not considered stable and may be different in newer versions.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-repo-integrity-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity>`_
 
-        :param name: A repository name
-        :param blob_thread_pool_concurrency: Number of threads to use for reading blob
-            contents
-        :param index_snapshot_verification_concurrency: Number of snapshots to verify
-            concurrently within each index
-        :param index_verification_concurrency: Number of indices to verify concurrently
-        :param max_bytes_per_sec: Rate limit for individual blob verification
-        :param max_failed_shard_snapshots: Maximum permitted number of failed shard snapshots
-        :param meta_thread_pool_concurrency: Number of threads to use for reading metadata
-        :param snapshot_verification_concurrency: Number of snapshots to verify concurrently
-        :param verify_blob_contents: Whether to verify the contents of individual blobs
+        :param name: The name of the snapshot repository.
+        :param blob_thread_pool_concurrency: If `verify_blob_contents` is `true`, this
+            parameter specifies how many blobs to verify at once.
+        :param index_snapshot_verification_concurrency: The maximum number of index snapshots
+            to verify concurrently within each index verification.
+        :param index_verification_concurrency: The number of indices to verify concurrently.
+            The default behavior is to use the entire `snapshot_meta` thread pool.
+        :param max_bytes_per_sec: If `verify_blob_contents` is `true`, this parameter
+            specifies the maximum amount of data that Elasticsearch will read from the
+            repository every second.
+        :param max_failed_shard_snapshots: The number of shard snapshot failures to track
+            during integrity verification, in order to avoid excessive resource usage.
+            If your repository contains more than this number of shard snapshot failures,
+            the verification will fail.
+        :param meta_thread_pool_concurrency: The maximum number of snapshot metadata
+            operations to run concurrently. The default behavior is to use at most half
+            of the `snapshot_meta` thread pool at once.
+        :param snapshot_verification_concurrency: The number of snapshots to verify concurrently.
+            The default behavior is to use at most half of the `snapshot_meta` thread
+            pool at once.
+        :param verify_blob_contents: Indicates whether to verify the checksum of every
+            data blob in the repository. If this feature is enabled, Elasticsearch will
+            read the entire repository contents, which may be extremely slow and expensive.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
@@ -991,23 +1128,66 @@ class SnapshotClient(NamespacedClient):
           <p>If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/restore-snapshot-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore>`_
 
-        :param repository: A repository name
-        :param snapshot: A snapshot name
-        :param feature_states:
-        :param ignore_index_settings:
-        :param ignore_unavailable:
-        :param include_aliases:
-        :param include_global_state:
-        :param index_settings:
-        :param indices:
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param partial:
-        :param rename_pattern:
-        :param rename_replacement:
-        :param wait_for_completion: Should this request wait until the operation has
-            completed before returning
+        :param repository: The name of the repository to restore a snapshot from.
+        :param snapshot: The name of the snapshot to restore.
+        :param feature_states: The feature states to restore. If `include_global_state`
+            is `true`, the request restores all feature states in the snapshot by default.
+            If `include_global_state` is `false`, the request restores no feature states
+            by default. Note that specifying an empty array will result in the default
+            behavior. To restore no feature states, regardless of the `include_global_state`
+            value, specify an array containing only the value `none` (`["none"]`).
+        :param ignore_index_settings: The index settings to not restore from the snapshot.
+            You can't use this option to ignore `index.number_of_shards`. For data streams,
+            this option applies only to restored backing indices. New backing indices
+            are configured using the data stream's matching index template.
+        :param ignore_unavailable: If `true`, the request ignores any index or data stream
+            in indices that's missing from the snapshot. If `false`, the request returns
+            an error for any missing index or data stream.
+        :param include_aliases: If `true`, the request restores aliases for any restored
+            data streams and indices. If `false`, the request doesn’t restore aliases.
+        :param include_global_state: If `true`, restore the cluster state. The cluster
+            state includes: * Persistent cluster settings * Index templates * Legacy
+            index templates * Ingest pipelines * Index lifecycle management (ILM) policies
+            * Stored scripts * For snapshots taken after 7.12.0, feature states If `include_global_state`
+            is `true`, the restore operation merges the legacy index templates in your
+            cluster with the templates contained in the snapshot, replacing any existing
+            ones whose name matches one in the snapshot. It completely removes all persistent
+            settings, non-legacy index templates, ingest pipelines, and ILM lifecycle
+            policies that exist in your cluster and replaces them with the corresponding
+            items from the snapshot. Use the `feature_states` parameter to configure
+            how feature states are restored. If `include_global_state` is `true` and
+            a snapshot was created without a global state then the restore request will
+            fail.
+        :param index_settings: Index settings to add or change in restored indices, including
+            backing indices. You can't use this option to change `index.number_of_shards`.
+            For data streams, this option applies only to restored backing indices. New
+            backing indices are configured using the data stream's matching index template.
+        :param indices: A comma-separated list of indices and data streams to restore.
+            It supports a multi-target syntax. The default behavior is all regular indices
+            and regular data streams in the snapshot. You can't use this parameter to
+            restore system indices or system data streams. Use `feature_states` instead.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param partial: If `false`, the entire restore operation will fail if one or
+            more indices included in the snapshot do not have all primary shards available.
+            If true, it allows restoring a partial snapshot of indices with unavailable
+            shards. Only shards that were successfully included in the snapshot will
+            be restored. All missing shards will be recreated as empty.
+        :param rename_pattern: A rename pattern to apply to restored data streams and
+            indices. Data streams and indices matching the rename pattern will be renamed
+            according to `rename_replacement`. The rename pattern is applied as defined
+            by the regular expression that supports referencing the original text, according
+            to the `appendReplacement` logic.
+        :param rename_replacement: The rename replacement string that is used with the
+            `rename_pattern`.
+        :param wait_for_completion: If `true`, the request returns a response when the
+            restore operation completes. The operation is complete when it finishes all
+            attempts to recover primary shards for restored indices. This applies even
+            if one or more of the recovery attempts fail. If `false`, the request returns
+            a response when the restore operation initializes.
         """
         if repository in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'repository'")
@@ -1085,9 +1265,17 @@ class SnapshotClient(NamespacedClient):
         .. raw:: html
 
           <p>Get the snapshot status.
-          Get a detailed description of the current state for each shard participating in the snapshot.
-          Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots.
+          Get a detailed description of the current state for each shard participating in the snapshot.</p>
+          <p>Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots.
           If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.</p>
+          <p>If you omit the <code>&lt;snapshot&gt;</code> request path parameter, the request retrieves information only for currently running snapshots.
+          This usage is preferred.
+          If needed, you can specify <code>&lt;repository&gt;</code> and <code>&lt;snapshot&gt;</code> to retrieve information for specific snapshots, even if they're not currently running.</p>
+          <p>Note that the stats will not be available for any shard snapshots in an ongoing snapshot completed by a node that (even momentarily) left the cluster.
+          Loading the stats from the repository is an expensive operation (see the WARNING below).
+          Therefore the stats values for such shards will be -1 even though the &quot;stage&quot; value will be &quot;DONE&quot;, in order to minimize latency.
+          A &quot;description&quot; field will be present for a shard snapshot completed by a departed node explaining why the shard snapshot's stats results are invalid.
+          Consequently, the total stats for the index will be less than expected due to the missing values from these shards.</p>
           <p>WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.
           The API requires a read from the repository for each shard in each snapshot.
           For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).</p>
@@ -1095,13 +1283,18 @@ class SnapshotClient(NamespacedClient):
           These requests can also tax machine resources and, when using cloud storage, incur high processing costs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status>`_
 
-        :param repository: A repository name
-        :param snapshot: A comma-separated list of snapshot names
-        :param ignore_unavailable: Whether to ignore unavailable snapshots, defaults
-            to false which means a SnapshotMissingException is thrown
-        :param master_timeout: Explicit operation timeout for connection to master node
+        :param repository: The snapshot repository name used to limit the request. It
+            supports wildcards (`*`) if `<snapshot>` isn't specified.
+        :param snapshot: A comma-separated list of snapshots to retrieve status for.
+            The default is currently running snapshots. Wildcards (`*`) are not supported.
+        :param ignore_unavailable: If `false`, the request returns an error for any snapshots
+            that are unavailable. If `true`, the request ignores snapshots that are unavailable,
+            such as those that are corrupted or temporarily cannot be returned.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
         """
         __path_parts: t.Dict[str, str]
         if repository not in SKIP_IN_PATH and snapshot not in SKIP_IN_PATH:
@@ -1158,11 +1351,17 @@ class SnapshotClient(NamespacedClient):
           Check for common misconfigurations in a snapshot repository.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-snapshot-repo-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository>`_
 
-        :param name: A repository name
-        :param master_timeout: Explicit operation timeout for connection to master node
-        :param timeout: Explicit operation timeout
+        :param name: The name of the snapshot repository to verify.
+        :param master_timeout: The period to wait for the master node. If the master
+            node is not available before the timeout expires, the request fails and returns
+            an error. To indicate that the request should never timeout, set it to `-1`.
+        :param timeout: The period to wait for a response from all relevant nodes in
+            the cluster after updating the cluster metadata. If no response is received
+            before the timeout expires, the cluster metadata update still applies but
+            the response will indicate that it was not completely acknowledged. To indicate
+            that the request should never timeout, set it to `-1`.
         """
         if name in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'name'")
diff -pruN 8.17.2-2/elasticsearch/_sync/client/sql.py 9.2.0-1/elasticsearch/_sync/client/sql.py
--- 8.17.2-2/elasticsearch/_sync/client/sql.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/sql.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,7 +44,7 @@ class SqlClient(NamespacedClient):
           <p>Clear an SQL search cursor.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clear-sql-cursor-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor>`_
 
         :param cursor: Cursor to clear.
         """
@@ -99,7 +99,7 @@ class SqlClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-async-sql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async>`_
 
         :param id: The identifier for the search.
         """
@@ -150,7 +150,7 @@ class SqlClient(NamespacedClient):
           <p>If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-sql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async>`_
 
         :param id: The identifier for the search.
         :param delimiter: The separator for CSV results. The API supports this parameter
@@ -212,7 +212,7 @@ class SqlClient(NamespacedClient):
           Get the current status of an async SQL search or a stored synchronous SQL search.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-sql-search-status-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status>`_
 
         :param id: The identifier for the search.
         """
@@ -283,8 +283,9 @@ class SqlClient(NamespacedClient):
         keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         keep_on_completion: t.Optional[bool] = None,
         page_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
-        params: t.Optional[t.Mapping[str, t.Any]] = None,
+        params: t.Optional[t.Sequence[t.Any]] = None,
         pretty: t.Optional[bool] = None,
+        project_routing: t.Optional[str] = None,
         query: t.Optional[str] = None,
         request_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None,
@@ -301,7 +302,7 @@ class SqlClient(NamespacedClient):
           Run an SQL request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/sql-search-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query>`_
 
         :param allow_partial_search_results: If `true`, the response has partial results
             when there are shard request timeouts or shard failures. If `false`, the
@@ -332,6 +333,10 @@ class SqlClient(NamespacedClient):
             is no longer available. Subsequent scroll requests prolong the lifetime of
             the scroll cursor by the duration of `page_timeout` in the scroll request.
         :param params: The values for parameters in the query.
+        :param project_routing: Specifies a subset of projects to target for the search
+            using project metadata tags in a subset of Lucene query syntax. Allowed Lucene
+            queries: the _alias tag and a single value (possibly wildcarded). Examples:
+            _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only.
         :param query: The SQL query to run.
         :param request_timeout: The timeout before the request fails.
         :param runtime_mappings: One or more runtime fields for the search request. These
@@ -357,6 +362,8 @@ class SqlClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if project_routing is not None:
+            __query["project_routing"] = project_routing
         if not __body:
             if allow_partial_search_results is not None:
                 __body["allow_partial_search_results"] = allow_partial_search_results
@@ -427,7 +434,7 @@ class SqlClient(NamespacedClient):
           It accepts the same request body parameters as the SQL search API, excluding <code>cursor</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/sql-translate-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate>`_
 
         :param query: The SQL query to run.
         :param fetch_size: The maximum number of rows (or entries) to return in one response.
diff -pruN 8.17.2-2/elasticsearch/_sync/client/ssl.py 9.2.0-1/elasticsearch/_sync/client/ssl.py
--- 8.17.2-2/elasticsearch/_sync/client/ssl.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/ssl.py	2025-10-28 16:50:53.000000000 +0000
@@ -52,7 +52,7 @@ class SslClient(NamespacedClient):
           <p>If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/security-api-ssl.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates>`_
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_ssl/certificates"
diff -pruN 8.17.2-2/elasticsearch/_sync/client/streams.py 9.2.0-1/elasticsearch/_sync/client/streams.py
--- 8.17.2-2/elasticsearch/_sync/client/streams.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/streams.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,185 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+import typing as t
+
+from elastic_transport import ObjectApiResponse, TextApiResponse
+
+from ._base import NamespacedClient
+from .utils import (
+    Stability,
+    _rewrite_parameters,
+    _stability_warning,
+)
+
+
+class StreamsClient(NamespacedClient):
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def logs_disable(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
+        """
+        .. raw:: html
+
+          <p>Disable logs stream.</p>
+          <p>Turn off the logs stream feature for this cluster.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch#TODO>`_
+
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_streams/logs/_disable"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json,text/plain"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="streams.logs_disable",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def logs_enable(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]:
+        """
+        .. raw:: html
+
+          <p>Enable logs stream.</p>
+          <p>Turn on the logs stream feature for this cluster.</p>
+          <p>NOTE: To protect existing data, this feature can be turned on only if the
+          cluster does not have existing indices or data streams that match the pattern <code>logs|logs.*</code>.
+          If those indices or data streams exist, a <code>409 - Conflict</code> response and error is returned.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch#TODO>`_
+
+        :param master_timeout: The period to wait for a connection to the master node.
+            If no response is received before the timeout expires, the request fails
+            and returns an error.
+        :param timeout: The period to wait for a response. If no response is received
+            before the timeout expires, the request fails and returns an error.
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_streams/logs/_enable"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json,text/plain"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="streams.logs_enable",
+            path_parts=__path_parts,
+        )
+
+    @_rewrite_parameters()
+    @_stability_warning(Stability.EXPERIMENTAL)
+    def status(
+        self,
+        *,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+        pretty: t.Optional[bool] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Get the status of streams.</p>
+          <p>Get the current status for all types of streams.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch#TODO>`_
+
+        :param master_timeout: Period to wait for a connection to the master node. If
+            no response is received before the timeout expires, the request fails and
+            returns an error.
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_streams/status"
+        __query: t.Dict[str, t.Any] = {}
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
+        if pretty is not None:
+            __query["pretty"] = pretty
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "GET",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="streams.status",
+            path_parts=__path_parts,
+        )
diff -pruN 8.17.2-2/elasticsearch/_sync/client/synonyms.py 9.2.0-1/elasticsearch/_sync/client/synonyms.py
--- 8.17.2-2/elasticsearch/_sync/client/synonyms.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/synonyms.py	2025-10-28 16:50:53.000000000 +0000
@@ -53,7 +53,7 @@ class SynonymsClient(NamespacedClient):
           When the synonyms set is not used in analyzers, you will be able to delete it.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-synonyms-set.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym>`_
 
         :param id: The synonyms set identifier to delete.
         """
@@ -90,6 +90,7 @@ class SynonymsClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        refresh: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
@@ -98,10 +99,13 @@ class SynonymsClient(NamespacedClient):
           Delete a synonym rule from a synonym set.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-synonym-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule>`_
 
         :param set_id: The ID of the synonym set to update.
         :param rule_id: The ID of the synonym rule to delete.
+        :param refresh: If `true`, the request will refresh the analyzers with the deleted
+            synonym rule and wait for the new synonyms to be available before returning.
+            If `false`, analyzers will not be reloaded with the deleted synonym rule
         """
         if set_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'set_id'")
@@ -121,6 +125,8 @@ class SynonymsClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if refresh is not None:
+            __query["refresh"] = refresh
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "DELETE",
@@ -151,7 +157,7 @@ class SynonymsClient(NamespacedClient):
           <p>Get a synonym set.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-synonyms-set.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym>`_
 
         :param id: The synonyms set identifier to retrieve.
         :param from_: The starting offset for query rules to retrieve.
@@ -202,7 +208,7 @@ class SynonymsClient(NamespacedClient):
           Get a synonym rule from a synonym set.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-synonym-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule>`_
 
         :param set_id: The ID of the synonym set to retrieve the synonym rule from.
         :param rule_id: The ID of the synonym rule to retrieve.
@@ -255,7 +261,7 @@ class SynonymsClient(NamespacedClient):
           Get a summary of all defined synonym sets.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-synonyms-set.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym>`_
 
         :param from_: The starting offset for synonyms sets to retrieve.
         :param size: The maximum number of synonyms sets to retrieve.
@@ -299,6 +305,7 @@ class SynonymsClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        refresh: t.Optional[bool] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -309,12 +316,16 @@ class SynonymsClient(NamespacedClient):
           If you need to manage more synonym rules, you can create multiple synonym sets.</p>
           <p>When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices.
           This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.</p>
+          <p>For practical examples of how to create or update a synonyms set, refer to the External documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-synonyms-set.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym>`_
 
         :param id: The ID of the synonyms set to be created or updated.
         :param synonyms_set: The synonym rules definitions for the synonyms set.
+        :param refresh: If `true`, the request will refresh the analyzers with the new
+            synonyms set and wait for the new synonyms to be available before returning.
+            If `false`, analyzers will not be reloaded with the new synonym set
         """
         if id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'id'")
@@ -332,6 +343,8 @@ class SynonymsClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if refresh is not None:
+            __query["refresh"] = refresh
         if not __body:
             if synonyms_set is not None:
                 __body["synonyms_set"] = synonyms_set
@@ -359,6 +372,7 @@ class SynonymsClient(NamespacedClient):
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        refresh: t.Optional[bool] = None,
         body: t.Optional[t.Dict[str, t.Any]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -370,12 +384,15 @@ class SynonymsClient(NamespacedClient):
           <p>When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-synonym-rule.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule>`_
 
         :param set_id: The ID of the synonym set.
         :param rule_id: The ID of the synonym rule to be updated or created.
         :param synonyms: The synonym rule information definition, which must be in Solr
             format.
+        :param refresh: If `true`, the request will refresh the analyzers with the new
+            synonym rule and wait for the new synonyms to be available before returning.
+            If `false`, analyzers will not be reloaded with the new synonym rule
         """
         if set_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'set_id'")
@@ -398,6 +415,8 @@ class SynonymsClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if refresh is not None:
+            __query["refresh"] = refresh
         if not __body:
             if synonyms is not None:
                 __body["synonyms"] = synonyms
diff -pruN 8.17.2-2/elasticsearch/_sync/client/tasks.py 9.2.0-1/elasticsearch/_sync/client/tasks.py
--- 8.17.2-2/elasticsearch/_sync/client/tasks.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/tasks.py	2025-10-28 16:50:53.000000000 +0000
@@ -36,7 +36,7 @@ class TasksClient(NamespacedClient):
     def cancel(
         self,
         *,
-        task_id: t.Optional[t.Union[int, str]] = None,
+        task_id: t.Optional[str] = None,
         actions: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
@@ -60,7 +60,7 @@ class TasksClient(NamespacedClient):
           You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks>`_
 
         :param task_id: The task identifier.
         :param actions: A comma-separated list or wildcard expression of actions that
@@ -128,7 +128,7 @@ class TasksClient(NamespacedClient):
           <p>If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks>`_
 
         :param task_id: The task identifier.
         :param timeout: The period to wait for a response. If no response is received
@@ -176,7 +176,6 @@ class TasksClient(NamespacedClient):
             t.Union[str, t.Literal["nodes", "none", "parents"]]
         ] = None,
         human: t.Optional[bool] = None,
-        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         nodes: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         parent_task_id: t.Optional[str] = None,
         pretty: t.Optional[bool] = None,
@@ -239,7 +238,7 @@ class TasksClient(NamespacedClient):
           The <code>X-Opaque-Id</code> in the children <code>headers</code> is the child task of the task that was initiated by the REST request.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/tasks.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks>`_
 
         :param actions: A comma-separated list or wildcard expression of actions used
             to limit the request. For example, you can use `cluser:*` to retrieve all
@@ -249,9 +248,6 @@ class TasksClient(NamespacedClient):
             other but is more costly to run.
         :param group_by: A key that is used to group tasks in the response. The task
             lists can be grouped either by nodes or by parent tasks.
-        :param master_timeout: The period to wait for a connection to the master node.
-            If no response is received before the timeout expires, the request fails
-            and returns an error.
         :param nodes: A comma-separated list of node IDs or names that is used to limit
             the returned information.
         :param parent_task_id: A parent task identifier that is used to limit returned
@@ -278,8 +274,6 @@ class TasksClient(NamespacedClient):
             __query["group_by"] = group_by
         if human is not None:
             __query["human"] = human
-        if master_timeout is not None:
-            __query["master_timeout"] = master_timeout
         if nodes is not None:
             __query["nodes"] = nodes
         if parent_task_id is not None:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/text_structure.py 9.2.0-1/elasticsearch/_sync/client/text_structure.py
--- 8.17.2-2/elasticsearch/_sync/client/text_structure.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/text_structure.py	2025-10-28 16:50:53.000000000 +0000
@@ -72,7 +72,7 @@ class TextStructureClient(NamespacedClie
           It helps determine why the returned structure was chosen.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-field-structure.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure>`_
 
         :param field: The field that should be analyzed.
         :param index: The name of the index that contains the analyzed field.
@@ -259,7 +259,7 @@ class TextStructureClient(NamespacedClie
           It helps determine why the returned structure was chosen.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-message-structure.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure>`_
 
         :param messages: The list of messages you want to analyze.
         :param column_names: If the format is `delimited`, you can specify the column
@@ -433,7 +433,7 @@ class TextStructureClient(NamespacedClie
           However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/find-structure.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure>`_
 
         :param text_files:
         :param charset: The text's character set. It must be a character set that is
@@ -620,7 +620,7 @@ class TextStructureClient(NamespacedClie
           The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/test-grok-pattern.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern>`_
 
         :param grok_pattern: The Grok pattern to run on the text.
         :param text: The lines of text to run the Grok pattern on.
diff -pruN 8.17.2-2/elasticsearch/_sync/client/transform.py 9.2.0-1/elasticsearch/_sync/client/transform.py
--- 8.17.2-2/elasticsearch/_sync/client/transform.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/transform.py	2025-10-28 16:50:53.000000000 +0000
@@ -41,11 +41,10 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Delete a transform.
-          Deletes a transform.</p>
+          <p>Delete a transform.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform>`_
 
         :param transform_id: Identifier for the transform.
         :param delete_dest_index: If this value is true, the destination index is deleted
@@ -106,10 +105,10 @@ class TransformClient(NamespacedClient):
         .. raw:: html
 
           <p>Get transforms.
-          Retrieves configuration information for transforms.</p>
+          Get configuration information for transforms.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform>`_
 
         :param transform_id: Identifier for the transform. It can be a transform identifier
             or a wildcard expression. You can get information for all transforms by using
@@ -178,11 +177,11 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Get transform stats.
-          Retrieves usage information for transforms.</p>
+          <p>Get transform stats.</p>
+          <p>Get usage information for transforms.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-transform-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats>`_
 
         :param transform_id: Identifier for the transform. It can be a transform identifier
             or a wildcard expression. You can get information for all transforms by using
@@ -270,7 +269,7 @@ class TransformClient(NamespacedClient):
           types of the source index and the transform aggregations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/preview-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform>`_
 
         :param transform_id: Identifier for the transform to preview. If you specify
             this path parameter, you cannot provide transform configuration details in
@@ -407,7 +406,7 @@ class TransformClient(NamespacedClient):
           give users any privileges on <code>.data-frame-internal*</code> indices.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/put-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform>`_
 
         :param transform_id: Identifier for the transform. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -503,17 +502,17 @@ class TransformClient(NamespacedClient):
         force: t.Optional[bool] = None,
         human: t.Optional[bool] = None,
         pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
         .. raw:: html
 
-          <p>Reset a transform.
-          Resets a transform.
-          Before you can reset it, you must stop it; alternatively, use the <code>force</code> query parameter.
+          <p>Reset a transform.</p>
+          <p>Before you can reset it, you must stop it; alternatively, use the <code>force</code> query parameter.
           If the destination index was created by the transform, it is deleted.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/reset-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform>`_
 
         :param transform_id: Identifier for the transform. This identifier can contain
             lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
@@ -521,6 +520,8 @@ class TransformClient(NamespacedClient):
         :param force: If this value is `true`, the transform is reset regardless of its
             current state. If it's `false`, the transform must be stopped before it can
             be reset.
+        :param timeout: Period to wait for a response. If no response is received before
+            the timeout expires, the request fails and returns an error.
         """
         if transform_id in SKIP_IN_PATH:
             raise ValueError("Empty value passed for parameter 'transform_id'")
@@ -537,6 +538,8 @@ class TransformClient(NamespacedClient):
             __query["human"] = human
         if pretty is not None:
             __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
         __headers = {"accept": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "POST",
@@ -561,15 +564,15 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Schedule a transform to start now.
-          Instantly runs a transform to process data.</p>
-          <p>If you _schedule_now a transform, it will process the new data instantly,
-          without waiting for the configured frequency interval. After _schedule_now API is called,
-          the transform will be processed again at now + frequency unless _schedule_now API
+          <p>Schedule a transform to start now.</p>
+          <p>Instantly run a transform to process data.
+          If you run this API, the transform will process the new data instantly,
+          without waiting for the configured frequency interval. After the API is called,
+          the transform will be processed again at <code>now + frequency</code> unless the API
           is called again in the meantime.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/schedule-now-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform>`_
 
         :param transform_id: Identifier for the transform.
         :param timeout: Controls the time to wait for the scheduling to take place
@@ -599,6 +602,66 @@ class TransformClient(NamespacedClient):
             path_parts=__path_parts,
         )
 
+    @_rewrite_parameters()
+    def set_upgrade_mode(
+        self,
+        *,
+        enabled: t.Optional[bool] = None,
+        error_trace: t.Optional[bool] = None,
+        filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
+        human: t.Optional[bool] = None,
+        pretty: t.Optional[bool] = None,
+        timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
+    ) -> ObjectApiResponse[t.Any]:
+        """
+        .. raw:: html
+
+          <p>Set upgrade_mode for transform indices.
+          Sets a cluster wide upgrade_mode setting that prepares transform
+          indices for an upgrade.
+          When upgrading your cluster, in some circumstances you must restart your
+          nodes and reindex your transform indices. In those circumstances,
+          there must be no transforms running. You can close the transforms,
+          do the upgrade, then open all the transforms again. Alternatively,
+          you can use this API to temporarily halt tasks associated with the transforms
+          and prevent new transforms from opening. You can also use this API
+          during upgrades that do not require you to reindex your transform
+          indices, though stopping transforms is not a requirement in that case.
+          You can see the current value for the upgrade_mode setting by using the get
+          transform info API.</p>
+
+
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-set-upgrade-mode>`_
+
+        :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts
+            all transform tasks and prohibits new transform tasks from starting.
+        :param timeout: The time to wait for the request to be completed.
+        """
+        __path_parts: t.Dict[str, str] = {}
+        __path = "/_transform/set_upgrade_mode"
+        __query: t.Dict[str, t.Any] = {}
+        if enabled is not None:
+            __query["enabled"] = enabled
+        if error_trace is not None:
+            __query["error_trace"] = error_trace
+        if filter_path is not None:
+            __query["filter_path"] = filter_path
+        if human is not None:
+            __query["human"] = human
+        if pretty is not None:
+            __query["pretty"] = pretty
+        if timeout is not None:
+            __query["timeout"] = timeout
+        __headers = {"accept": "application/json"}
+        return self.perform_request(  # type: ignore[return-value]
+            "POST",
+            __path,
+            params=__query,
+            headers=__headers,
+            endpoint_id="transform.set_upgrade_mode",
+            path_parts=__path_parts,
+        )
+
     @_rewrite_parameters(
         parameter_aliases={"from": "from_"},
     )
@@ -616,8 +679,7 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Start a transform.
-          Starts a transform.</p>
+          <p>Start a transform.</p>
           <p>When you start a transform, it creates the destination index if it does not already exist. The <code>number_of_shards</code> is
           set to <code>1</code> and the <code>auto_expand_replicas</code> is set to <code>0-1</code>. If it is a pivot transform, it deduces the mapping
           definitions for the destination index from the source indices and the transform aggregations. If fields in the
@@ -633,7 +695,7 @@ class TransformClient(NamespacedClient):
           destination indices, the transform fails when it attempts unauthorized operations.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/start-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform>`_
 
         :param transform_id: Identifier for the transform.
         :param from_: Restricts the set of transformed entities to those changed after
@@ -691,7 +753,7 @@ class TransformClient(NamespacedClient):
           Stops one or more transforms.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/stop-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform>`_
 
         :param transform_id: Identifier for the transform. To stop multiple transforms,
             use a comma-separated list or a wildcard expression. To stop all transforms,
@@ -793,7 +855,7 @@ class TransformClient(NamespacedClient):
           time of update and runs with those privileges.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/update-transform.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform>`_
 
         :param transform_id: Identifier for the transform.
         :param defer_validation: When true, deferrable validations are not run. This
@@ -874,8 +936,8 @@ class TransformClient(NamespacedClient):
         """
         .. raw:: html
 
-          <p>Upgrade all transforms.
-          Transforms are compatible across minor versions and between supported major versions.
+          <p>Upgrade all transforms.</p>
+          <p>Transforms are compatible across minor versions and between supported major versions.
           However, over time, the format of transform configuration information may change.
           This API identifies transforms that have a legacy configuration format and upgrades them to the latest version.
           It also cleans up the internal data structures that store the transform state and checkpoints.
@@ -888,7 +950,7 @@ class TransformClient(NamespacedClient):
           You may want to perform a recent cluster backup prior to the upgrade.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/upgrade-transforms.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms>`_
 
         :param dry_run: When true, the request checks for updates but does not run them.
         :param timeout: Period to wait for a response. If no response is received before
diff -pruN 8.17.2-2/elasticsearch/_sync/client/utils.py 9.2.0-1/elasticsearch/_sync/client/utils.py
--- 8.17.2-2/elasticsearch/_sync/client/utils.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/utils.py	2025-10-28 16:50:53.000000000 +0000
@@ -134,7 +134,6 @@ def client_node_configs(
 
     def apply_node_options(node_config: NodeConfig) -> NodeConfig:
         """Needs special handling of headers since .replace() wipes out existing headers"""
-        nonlocal node_options
         headers = node_config.headers.copy()  # type: ignore[attr-defined]
 
         headers_to_add = node_options.pop("headers", ())
@@ -182,8 +181,6 @@ def host_mapping_to_node_config(host: Ma
         "host",
         "port",
         "path_prefix",
-        "url_prefix",
-        "use_ssl",
     }
     disallowed_keys = set(host.keys()).difference(allow_hosts_keys)
     if disallowed_keys:
@@ -197,41 +194,6 @@ def host_mapping_to_node_config(host: Ma
 
     options = dict(host)
 
-    # Handle the deprecated option 'use_ssl'
-    if "use_ssl" in options:
-        use_ssl = options.pop("use_ssl")
-        if not isinstance(use_ssl, bool):
-            raise TypeError("'use_ssl' must be of type 'bool'")
-
-        # Ensure the user isn't specifying scheme=http use_ssl=True or vice-versa
-        if "scheme" in options and (options["scheme"] == "https") != use_ssl:
-            raise ValueError(
-                f"Cannot specify conflicting options 'scheme={options['scheme']}' "
-                f"and 'use_ssl={use_ssl}'. Use 'scheme' only instead"
-            )
-
-        warnings.warn(
-            "The 'use_ssl' option is no longer needed as specifying a 'scheme' is now required",
-            category=DeprecationWarning,
-            stacklevel=warn_stacklevel(),
-        )
-        options.setdefault("scheme", "https" if use_ssl else "http")
-
-    # Handle the deprecated option 'url_prefix'
-    if "url_prefix" in options:
-        if "path_prefix" in options:
-            raise ValueError(
-                "Cannot specify conflicting options 'url_prefix' and "
-                "'path_prefix'. Use 'path_prefix' only instead"
-            )
-
-        warnings.warn(
-            "The 'url_prefix' option is deprecated in favor of 'path_prefix'",
-            category=DeprecationWarning,
-            stacklevel=warn_stacklevel(),
-        )
-        options["path_prefix"] = options.pop("url_prefix")
-
     return NodeConfig(**options)  # type: ignore[arg-type]
 
 
@@ -343,8 +305,6 @@ def _rewrite_parameters(
     def wrapper(api: F) -> F:
         @wraps(api)
         def wrapped(*args: Any, **kwargs: Any) -> Any:
-            nonlocal api, body_name, body_fields
-
             # Let's give a nicer error message when users pass positional arguments.
             if len(args) >= 2:
                 raise TypeError(
diff -pruN 8.17.2-2/elasticsearch/_sync/client/watcher.py 9.2.0-1/elasticsearch/_sync/client/watcher.py
--- 8.17.2-2/elasticsearch/_sync/client/watcher.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/watcher.py	2025-10-28 16:50:53.000000000 +0000
@@ -45,10 +45,11 @@ class WatcherClient(NamespacedClient):
           <p>IMPORTANT: If the specified watch is currently being executed, this API will return an error
           The reason for this behavior is to prevent overwriting the watch status from a watch execution.</p>
           <p>Acknowledging an action throttles further executions of that action until its <code>ack.state</code> is reset to <code>awaits_successful_execution</code>.
-          This happens when the condition of the watch is not met (the condition evaluates to false).</p>
+          This happens when the condition of the watch is not met (the condition evaluates to false).
+          To demonstrate how throttling works in practice and how it can be configured for individual actions within a watch, refer to External documentation.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-ack-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch>`_
 
         :param watch_id: The watch identifier.
         :param action_id: A comma-separated list of the action identifiers to acknowledge.
@@ -104,7 +105,7 @@ class WatcherClient(NamespacedClient):
           A watch can be either active or inactive.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-activate-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch>`_
 
         :param watch_id: The watch identifier.
         """
@@ -148,7 +149,7 @@ class WatcherClient(NamespacedClient):
           A watch can be either active or inactive.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-deactivate-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch>`_
 
         :param watch_id: The watch identifier.
         """
@@ -196,7 +197,7 @@ class WatcherClient(NamespacedClient):
           When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the <code>.watches</code> index.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-delete-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch>`_
 
         :param id: The watch identifier.
         """
@@ -274,10 +275,11 @@ class WatcherClient(NamespacedClient):
           This serves as great tool for testing and debugging your watches prior to adding them to Watcher.</p>
           <p>When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.
           If your user is allowed to read index <code>a</code>, but not index <code>b</code>, then the exact same set of rules will apply during execution of a watch.</p>
-          <p>When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.</p>
+          <p>When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.
+          Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-execute-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch>`_
 
         :param id: The watch identifier.
         :param action_modes: Determines how to handle the watch actions as part of the
@@ -365,7 +367,7 @@ class WatcherClient(NamespacedClient):
           Only a subset of settings are shown, for example <code>index.auto_expand_replicas</code> and <code>index.number_of_replicas</code>.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-get-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
@@ -410,7 +412,7 @@ class WatcherClient(NamespacedClient):
           <p>Get a watch.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-get-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch>`_
 
         :param id: The watch identifier.
         """
@@ -485,7 +487,7 @@ class WatcherClient(NamespacedClient):
           If the user is able to read index <code>a</code>, but not index <code>b</code>, the same will apply when the watch runs.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-put-watch.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch>`_
 
         :param id: The identifier for the watch.
         :param actions: The list of actions that will be run if the condition matches.
@@ -550,11 +552,7 @@ class WatcherClient(NamespacedClient):
                 __body["transform"] = transform
             if trigger is not None:
                 __body["trigger"] = trigger
-        if not __body:
-            __body = None  # type: ignore[assignment]
-        __headers = {"accept": "application/json"}
-        if __body is not None:
-            __headers["content-type"] = "application/json"
+        __headers = {"accept": "application/json", "content-type": "application/json"}
         return self.perform_request(  # type: ignore[return-value]
             "PUT",
             __path,
@@ -579,7 +577,7 @@ class WatcherClient(NamespacedClient):
         pretty: t.Optional[bool] = None,
         query: t.Optional[t.Mapping[str, t.Any]] = None,
         search_after: t.Optional[
-            t.Sequence[t.Union[None, bool, float, int, str, t.Any]]
+            t.Sequence[t.Union[None, bool, float, int, str]]
         ] = None,
         size: t.Optional[int] = None,
         sort: t.Optional[
@@ -598,7 +596,7 @@ class WatcherClient(NamespacedClient):
           <p>Note that only the <code>_id</code> and <code>metadata.*</code> fields are queryable or sortable.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-query-watches.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches>`_
 
         :param from_: The offset from the first result to fetch. It must be non-negative.
         :param query: A query that filters the watches to be returned.
@@ -663,6 +661,7 @@ class WatcherClient(NamespacedClient):
         error_trace: t.Optional[bool] = None,
         filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
         human: t.Optional[bool] = None,
+        master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
         pretty: t.Optional[bool] = None,
     ) -> ObjectApiResponse[t.Any]:
         """
@@ -672,7 +671,9 @@ class WatcherClient(NamespacedClient):
           Start the Watcher service if it is not already running.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-start.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start>`_
+
+        :param master_timeout: Period to wait for a connection to the master node.
         """
         __path_parts: t.Dict[str, str] = {}
         __path = "/_watcher/_start"
@@ -683,6 +684,8 @@ class WatcherClient(NamespacedClient):
             __query["filter_path"] = filter_path
         if human is not None:
             __query["human"] = human
+        if master_timeout is not None:
+            __query["master_timeout"] = master_timeout
         if pretty is not None:
             __query["pretty"] = pretty
         __headers = {"accept": "application/json"}
@@ -734,7 +737,7 @@ class WatcherClient(NamespacedClient):
           You retrieve more metrics by using the metric parameter.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-stats.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats>`_
 
         :param metric: Defines which additional metrics are included in the response.
         :param emit_stacktraces: Defines whether stack traces are generated for each
@@ -785,7 +788,7 @@ class WatcherClient(NamespacedClient):
           Stop the Watcher service if it is running.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-stop.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop>`_
 
         :param master_timeout: The period to wait for the master node. If the master
             node is not available before the timeout expires, the request fails and returns
@@ -840,10 +843,13 @@ class WatcherClient(NamespacedClient):
           <p>Update Watcher index settings.
           Update settings for the Watcher internal index (<code>.watches</code>).
           Only a subset of settings can be modified.
-          This includes <code>index.auto_expand_replicas</code> and <code>index.number_of_replicas</code>.</p>
+          This includes <code>index.auto_expand_replicas</code>, <code>index.number_of_replicas</code>, <code>index.routing.allocation.exclude.*</code>,
+          <code>index.routing.allocation.include.*</code> and <code>index.routing.allocation.require.*</code>.
+          Modification of <code>index.routing.allocation.include._tier_preference</code> is an exception and is not allowed as the
+          Watcher shards must always be in the <code>data_content</code> tier.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/watcher-api-update-settings.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings>`_
 
         :param index_auto_expand_replicas:
         :param index_number_of_replicas:
diff -pruN 8.17.2-2/elasticsearch/_sync/client/xpack.py 9.2.0-1/elasticsearch/_sync/client/xpack.py
--- 8.17.2-2/elasticsearch/_sync/client/xpack.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_sync/client/xpack.py	2025-10-28 16:50:53.000000000 +0000
@@ -54,7 +54,7 @@ class XPackClient(NamespacedClient):
           </ul>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/info-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info>`_
 
         :param accept_enterprise: If this param is used it must be set to true
         :param categories: A comma-separated list of the information categories to include
@@ -103,7 +103,7 @@ class XPackClient(NamespacedClient):
           The API also provides some usage statistics.</p>
 
 
-        `<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/usage-api.html>`_
+        `<https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack>`_
 
         :param master_timeout: The period to wait for a connection to the master node.
             If no response is received before the timeout expires, the request fails
diff -pruN 8.17.2-2/elasticsearch/_version.py 9.2.0-1/elasticsearch/_version.py
--- 8.17.2-2/elasticsearch/_version.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/_version.py	2025-10-28 16:50:53.000000000 +0000
@@ -15,4 +15,5 @@
 #  specific language governing permissions and limitations
 #  under the License.
 
-__versionstr__ = "8.17.2"
+__versionstr__ = "9.2.0"
+__es_specification_commit__ = "2f74c26e0a1d66c42232ce2830652c01e8717f00"
diff -pruN 8.17.2-2/elasticsearch/client.py 9.2.0-1/elasticsearch/client.py
--- 8.17.2-2/elasticsearch/client.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/client.py	2025-10-28 16:50:53.000000000 +0000
@@ -47,6 +47,7 @@ from ._sync.client.migration import Migr
 from ._sync.client.ml import MlClient as MlClient  # noqa: F401
 from ._sync.client.monitoring import MonitoringClient as MonitoringClient  # noqa: F401
 from ._sync.client.nodes import NodesClient as NodesClient  # noqa: F401
+from ._sync.client.project import ProjectClient as ProjectClient  # noqa: F401
 from ._sync.client.query_rules import QueryRulesClient as QueryRulesClient  # noqa: F401
 from ._sync.client.rollup import RollupClient as RollupClient  # noqa: F401
 from ._sync.client.search_application import (  # noqa: F401
@@ -62,6 +63,7 @@ from ._sync.client.slm import SlmClient
 from ._sync.client.snapshot import SnapshotClient as SnapshotClient  # noqa: F401
 from ._sync.client.sql import SqlClient as SqlClient  # noqa: F401
 from ._sync.client.ssl import SslClient as SslClient  # noqa: F401
+from ._sync.client.streams import StreamsClient as StreamsClient  # noqa: F401
 from ._sync.client.synonyms import SynonymsClient as SynonymsClient  # noqa: F401
 from ._sync.client.tasks import TasksClient as TasksClient  # noqa: F401
 from ._sync.client.text_structure import (  # noqa: F401
@@ -105,6 +107,7 @@ __all__ = [
     "MlClient",
     "MonitoringClient",
     "NodesClient",
+    "ProjectClient",
     "RollupClient",
     "SearchApplicationClient",
     "SearchableSnapshotsClient",
@@ -115,6 +118,7 @@ __all__ = [
     "SnapshotClient",
     "SqlClient",
     "SslClient",
+    "StreamsClient",
     "TasksClient",
     "TextStructureClient",
     "TransformClient",
diff -pruN 8.17.2-2/elasticsearch/compat.py 9.2.0-1/elasticsearch/compat.py
--- 8.17.2-2/elasticsearch/compat.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/compat.py	2025-10-28 16:50:53.000000000 +0000
@@ -16,12 +16,17 @@
 #  under the License.
 
 import inspect
+import os
 import sys
+from contextlib import contextmanager
 from pathlib import Path
-from typing import Tuple, Type, Union
+from threading import Thread
+from typing import Any, Callable, Iterator, Tuple, Type, Union
 
 string_types: Tuple[Type[str], Type[bytes]] = (str, bytes)
 
+DISABLE_WARN_STACKLEVEL_ENV_VAR = "DISABLE_WARN_STACKLEVEL"
+
 
 def to_str(x: Union[str, bytes], encoding: str = "ascii") -> str:
     if not isinstance(x, str):
@@ -37,6 +42,8 @@ def to_bytes(x: Union[str, bytes], encod
 
 def warn_stacklevel() -> int:
     """Dynamically determine warning stacklevel for warnings based on the call stack"""
+    if os.environ.get(DISABLE_WARN_STACKLEVEL_ENV_VAR) in ["1", "true", "True"]:
+        return 0
     try:
         # Grab the root module from the current module '__name__'
         module_name = __name__.partition(".")[0]
@@ -71,9 +78,36 @@ def warn_stacklevel() -> int:
     return 0
 
 
+@contextmanager
+def safe_thread(
+    target: Callable[..., Any], *args: Any, **kwargs: Any
+) -> Iterator[Thread]:
+    """Run a thread within a context manager block.
+
+    The thread is automatically joined when the block ends. If the thread raised
+    an exception, it is raised in the caller's context.
+    """
+    captured_exception = None
+
+    def run() -> None:
+        try:
+            target(*args, **kwargs)
+        except BaseException as exc:
+            nonlocal captured_exception
+            captured_exception = exc
+
+    thread = Thread(target=run)
+    thread.start()
+    yield thread
+    thread.join()
+    if captured_exception:
+        raise captured_exception
+
+
 __all__ = [
     "string_types",
     "to_str",
     "to_bytes",
     "warn_stacklevel",
+    "safe_thread",
 ]
diff -pruN 8.17.2-2/elasticsearch/dsl/__init__.py 9.2.0-1/elasticsearch/dsl/__init__.py
--- 8.17.2-2/elasticsearch/dsl/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,232 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from . import async_connections, connections
+from .aggs import A, Agg
+from .analysis import analyzer, char_filter, normalizer, token_filter, tokenizer
+from .document import AsyncDocument, Document
+from .document_base import E, InnerDoc, M, MetaField, mapped_field
+from .exceptions import (
+    ElasticsearchDslException,
+    IllegalOperation,
+    UnknownDslObject,
+    ValidationException,
+)
+from .faceted_search import (
+    AsyncFacetedSearch,
+    DateHistogramFacet,
+    Facet,
+    FacetedResponse,
+    FacetedSearch,
+    HistogramFacet,
+    NestedFacet,
+    RangeFacet,
+    TermsFacet,
+)
+from .field import (
+    AggregateMetricDouble,
+    Alias,
+    Binary,
+    Boolean,
+    Byte,
+    Completion,
+    ConstantKeyword,
+    CountedKeyword,
+    CustomField,
+    Date,
+    DateNanos,
+    DateRange,
+    DenseVector,
+    Double,
+    DoubleRange,
+    Field,
+    Flattened,
+    Float,
+    FloatRange,
+    GeoPoint,
+    GeoShape,
+    HalfFloat,
+    Histogram,
+    IcuCollationKeyword,
+    Integer,
+    IntegerRange,
+    Ip,
+    IpRange,
+    Join,
+    Keyword,
+    Long,
+    LongRange,
+    MatchOnlyText,
+    Murmur3,
+    Nested,
+    Object,
+    Passthrough,
+    Percolator,
+    Point,
+    RangeField,
+    RankFeature,
+    RankFeatures,
+    RankVectors,
+    ScaledFloat,
+    SearchAsYouType,
+    SemanticText,
+    Shape,
+    Short,
+    SparseVector,
+    Text,
+    TokenCount,
+    UnsignedLong,
+    Version,
+    Wildcard,
+    construct_field,
+)
+from .function import SF
+from .index import (
+    AsyncComposableIndexTemplate,
+    AsyncIndex,
+    AsyncIndexTemplate,
+    ComposableIndexTemplate,
+    Index,
+    IndexTemplate,
+)
+from .mapping import AsyncMapping, Mapping
+from .query import Q, Query
+from .response import AggResponse, Response, UpdateByQueryResponse
+from .search import (
+    AsyncEmptySearch,
+    AsyncMultiSearch,
+    AsyncSearch,
+    EmptySearch,
+    MultiSearch,
+    Search,
+)
+from .update_by_query import AsyncUpdateByQuery, UpdateByQuery
+from .utils import AttrDict, AttrList, DslBase
+from .wrappers import Range
+
+__all__ = [
+    "A",
+    "Agg",
+    "AggResponse",
+    "AggregateMetricDouble",
+    "Alias",
+    "AsyncComposableIndexTemplate",
+    "AsyncDocument",
+    "AsyncEmptySearch",
+    "AsyncFacetedSearch",
+    "AsyncIndex",
+    "AsyncIndexTemplate",
+    "AsyncMapping",
+    "AsyncMultiSearch",
+    "AsyncSearch",
+    "AsyncUpdateByQuery",
+    "AttrDict",
+    "AttrList",
+    "Binary",
+    "Boolean",
+    "Byte",
+    "Completion",
+    "ComposableIndexTemplate",
+    "ConstantKeyword",
+    "CountedKeyword",
+    "CustomField",
+    "Date",
+    "DateHistogramFacet",
+    "DateNanos",
+    "DateRange",
+    "DenseVector",
+    "Document",
+    "Double",
+    "DoubleRange",
+    "DslBase",
+    "E",
+    "ElasticsearchDslException",
+    "EmptySearch",
+    "Facet",
+    "FacetedResponse",
+    "FacetedSearch",
+    "Field",
+    "Flattened",
+    "Float",
+    "FloatRange",
+    "GeoPoint",
+    "GeoShape",
+    "HalfFloat",
+    "Histogram",
+    "HistogramFacet",
+    "IcuCollationKeyword",
+    "IllegalOperation",
+    "Index",
+    "IndexTemplate",
+    "InnerDoc",
+    "Integer",
+    "IntegerRange",
+    "Ip",
+    "IpRange",
+    "Join",
+    "Keyword",
+    "Long",
+    "LongRange",
+    "M",
+    "Mapping",
+    "MatchOnlyText",
+    "MetaField",
+    "MultiSearch",
+    "Murmur3",
+    "Nested",
+    "NestedFacet",
+    "Object",
+    "Passthrough",
+    "Percolator",
+    "Point",
+    "Q",
+    "Query",
+    "Range",
+    "RangeFacet",
+    "RangeField",
+    "RankFeature",
+    "RankFeatures",
+    "RankVectors",
+    "Response",
+    "SF",
+    "ScaledFloat",
+    "Search",
+    "SearchAsYouType",
+    "SemanticText",
+    "Shape",
+    "Short",
+    "SparseVector",
+    "TermsFacet",
+    "Text",
+    "TokenCount",
+    "UnknownDslObject",
+    "UnsignedLong",
+    "UpdateByQuery",
+    "UpdateByQueryResponse",
+    "ValidationException",
+    "Version",
+    "Wildcard",
+    "analyzer",
+    "async_connections",
+    "char_filter",
+    "connections",
+    "construct_field",
+    "mapped_field",
+    "normalizer",
+    "token_filter",
+    "tokenizer",
+]
diff -pruN 8.17.2-2/elasticsearch/dsl/_async/__init__.py 9.2.0-1/elasticsearch/dsl/_async/__init__.py
--- 8.17.2-2/elasticsearch/dsl/_async/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_async/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/elasticsearch/dsl/_async/document.py 9.2.0-1/elasticsearch/dsl/_async/document.py
--- 8.17.2-2/elasticsearch/dsl/_async/document.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_async/document.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,607 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    AsyncIterable,
+    AsyncIterator,
+    Dict,
+    List,
+    Optional,
+    Tuple,
+    Union,
+    cast,
+)
+
+from typing_extensions import Self, dataclass_transform
+
+from elasticsearch.exceptions import NotFoundError, RequestError
+from elasticsearch.helpers import async_bulk
+
+from .._async.index import AsyncIndex
+from ..async_connections import get_connection
+from ..document_base import DocumentBase, DocumentMeta, mapped_field
+from ..exceptions import IllegalOperation
+from ..utils import DOC_META_FIELDS, META_FIELDS, AsyncUsingType, merge
+from .search import AsyncSearch
+
+if TYPE_CHECKING:
+    from elasticsearch import AsyncElasticsearch
+    from elasticsearch.esql.esql import ESQLBase
+
+
+class AsyncIndexMeta(DocumentMeta):
+    _index: AsyncIndex
+
+    # global flag to guard us from associating an Index with the base Document
+    # class, only user defined subclasses should have an _index attr
+    _document_initialized = False
+
+    def __new__(
+        cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]
+    ) -> "AsyncIndexMeta":
+        new_cls = super().__new__(cls, name, bases, attrs)
+        if cls._document_initialized:
+            index_opts = attrs.pop("Index", None)
+            index = cls.construct_index(index_opts, bases)
+            new_cls._index = index
+            index.document(new_cls)
+        cls._document_initialized = True
+        return cast(AsyncIndexMeta, new_cls)
+
+    @classmethod
+    def construct_index(
+        cls, opts: Dict[str, Any], bases: Tuple[type, ...]
+    ) -> AsyncIndex:
+        if opts is None:
+            for b in bases:
+                if hasattr(b, "_index"):
+                    return b._index
+
+            # Set None as Index name so it will set _all while making the query
+            return AsyncIndex(name=None)
+
+        i = AsyncIndex(
+            getattr(opts, "name", "*"), using=getattr(opts, "using", "default")
+        )
+        i.settings(**getattr(opts, "settings", {}))
+        i.aliases(**getattr(opts, "aliases", {}))
+        for a in getattr(opts, "analyzers", ()):
+            i.analyzer(a)
+        return i
+
+
+@dataclass_transform(field_specifiers=(mapped_field,))
+class AsyncDocument(DocumentBase, metaclass=AsyncIndexMeta):
+    """
+    Model-like class for persisting documents in elasticsearch.
+    """
+
+    if TYPE_CHECKING:
+        _index: AsyncIndex
+
+    @classmethod
+    def _get_using(cls, using: Optional[AsyncUsingType] = None) -> AsyncUsingType:
+        return using or cls._index._using
+
+    @classmethod
+    def _get_connection(
+        cls, using: Optional[AsyncUsingType] = None
+    ) -> "AsyncElasticsearch":
+        return get_connection(cls._get_using(using))
+
+    @classmethod
+    async def init(
+        cls, index: Optional[str] = None, using: Optional[AsyncUsingType] = None
+    ) -> None:
+        """
+        Create the index and populate the mappings in elasticsearch.
+        """
+        i = cls._index
+        if index:
+            i = i.clone(name=index)
+        await i.save(using=using)
+
+    @classmethod
+    def search(
+        cls, using: Optional[AsyncUsingType] = None, index: Optional[str] = None
+    ) -> AsyncSearch[Self]:
+        """
+        Create an :class:`~elasticsearch.dsl.Search` instance that will search
+        over this ``Document``.
+        """
+        s = AsyncSearch[Self](
+            using=cls._get_using(using), index=cls._default_index(index), doc_type=[cls]
+        )
+        return s.source(exclude_vectors=False)
+
+    @classmethod
+    async def get(
+        cls,
+        id: str,
+        using: Optional[AsyncUsingType] = None,
+        index: Optional[str] = None,
+        **kwargs: Any,
+    ) -> Optional[Self]:
+        """
+        Retrieve a single document from elasticsearch using its ``id``.
+
+        :arg id: ``id`` of the document to be retrieved
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.get`` unchanged.
+        """
+        es = cls._get_connection(using)
+        doc = await es.get(index=cls._default_index(index), id=id, **kwargs)
+        if not doc.get("found", False):
+            return None
+        return cls.from_es(doc)
+
+    @classmethod
+    async def exists(
+        cls,
+        id: str,
+        using: Optional[AsyncUsingType] = None,
+        index: Optional[str] = None,
+        **kwargs: Any,
+    ) -> bool:
+        """
+        check if exists a single document from elasticsearch using its ``id``.
+
+        :arg id: ``id`` of the document to check if exists
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.exists`` unchanged.
+        """
+        es = cls._get_connection(using)
+        return bool(await es.exists(index=cls._default_index(index), id=id, **kwargs))
+
+    @classmethod
+    async def mget(
+        cls,
+        docs: List[Dict[str, Any]],
+        using: Optional[AsyncUsingType] = None,
+        index: Optional[str] = None,
+        raise_on_error: bool = True,
+        missing: str = "none",
+        **kwargs: Any,
+    ) -> List[Optional[Self]]:
+        r"""
+        Retrieve multiple document by their ``id``\s. Returns a list of instances
+        in the same order as requested.
+
+        :arg docs: list of ``id``\s of the documents to be retrieved or a list
+            of document specifications as per
+            https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg missing: what to do when one of the documents requested is not
+            found. Valid options are ``'none'`` (use ``None``), ``'raise'`` (raise
+            ``NotFoundError``) or ``'skip'`` (ignore the missing document).
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.mget`` unchanged.
+        """
+        if missing not in ("raise", "skip", "none"):
+            raise ValueError("'missing' must be 'raise', 'skip', or 'none'.")
+        es = cls._get_connection(using)
+        body = {
+            "docs": [
+                doc if isinstance(doc, collections.abc.Mapping) else {"_id": doc}
+                for doc in docs
+            ]
+        }
+        results = await es.mget(index=cls._default_index(index), body=body, **kwargs)
+
+        objs: List[Optional[Self]] = []
+        error_docs: List[Self] = []
+        missing_docs: List[Self] = []
+        for doc in results["docs"]:
+            if doc.get("found"):
+                if error_docs or missing_docs:
+                    # We're going to raise an exception anyway, so avoid an
+                    # expensive call to cls.from_es().
+                    continue
+
+                objs.append(cls.from_es(doc))
+
+            elif doc.get("error"):
+                if raise_on_error:
+                    error_docs.append(doc)
+                if missing == "none":
+                    objs.append(None)
+
+            # The doc didn't cause an error, but the doc also wasn't found.
+            elif missing == "raise":
+                missing_docs.append(doc)
+            elif missing == "none":
+                objs.append(None)
+
+        if error_docs:
+            error_ids = [doc["_id"] for doc in error_docs]
+            message = "Required routing not provided for documents %s."
+            message %= ", ".join(error_ids)
+            raise RequestError(400, message, error_docs)  # type: ignore[arg-type]
+        if missing_docs:
+            missing_ids = [doc["_id"] for doc in missing_docs]
+            message = f"Documents {', '.join(missing_ids)} not found."
+            raise NotFoundError(404, message, {"docs": missing_docs})  # type: ignore[arg-type]
+        return objs
+
+    async def delete(
+        self,
+        using: Optional[AsyncUsingType] = None,
+        index: Optional[str] = None,
+        **kwargs: Any,
+    ) -> None:
+        """
+        Delete the instance in elasticsearch.
+
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.delete`` unchanged.
+        """
+        es = self._get_connection(using)
+        # extract routing etc from meta
+        doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta}
+
+        # Optimistic concurrency control
+        if "seq_no" in self.meta and "primary_term" in self.meta:
+            doc_meta["if_seq_no"] = self.meta["seq_no"]
+            doc_meta["if_primary_term"] = self.meta["primary_term"]
+
+        doc_meta.update(kwargs)
+        i = self._get_index(index)
+        assert i is not None
+
+        await es.delete(index=i, **doc_meta)
+
+    async def update(
+        self,
+        using: Optional[AsyncUsingType] = None,
+        index: Optional[str] = None,
+        detect_noop: bool = True,
+        doc_as_upsert: bool = False,
+        refresh: bool = False,
+        retry_on_conflict: Optional[int] = None,
+        script: Optional[Union[str, Dict[str, Any]]] = None,
+        script_id: Optional[str] = None,
+        scripted_upsert: bool = False,
+        upsert: Optional[Dict[str, Any]] = None,
+        return_doc_meta: bool = False,
+        **fields: Any,
+    ) -> Any:
+        """
+        Partial update of the document, specify fields you wish to update and
+        both the instance and the document in elasticsearch will be updated::
+
+            doc = MyDocument(title='Document Title!')
+            doc.save()
+            doc.update(title='New Document Title!')
+
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg detect_noop: Set to ``False`` to disable noop detection.
+        :arg refresh: Control when the changes made by this request are visible
+            to search. Set to ``True`` for immediate effect.
+        :arg retry_on_conflict: In between the get and indexing phases of the
+            update, it is possible that another process might have already
+            updated the same document. By default, the update will fail with a
+            version conflict exception. The retry_on_conflict parameter
+            controls how many times to retry the update before finally throwing
+            an exception.
+        :arg doc_as_upsert:  Instead of sending a partial doc plus an upsert
+            doc, setting doc_as_upsert to true will use the contents of doc as
+            the upsert value
+        :arg script: the source code of the script as a string, or a dictionary
+            with script attributes to update.
+        :arg return_doc_meta: set to ``True`` to return all metadata from the
+            index API call instead of only the operation result
+
+        :return: operation result noop/updated
+        """
+        body: Dict[str, Any] = {
+            "doc_as_upsert": doc_as_upsert,
+            "detect_noop": detect_noop,
+        }
+
+        # scripted update
+        if script or script_id:
+            if upsert is not None:
+                body["upsert"] = upsert
+
+            if script:
+                if isinstance(script, str):
+                    script = {"source": script}
+            else:
+                script = {"id": script_id}
+
+            if "params" not in script:
+                script["params"] = fields
+            else:
+                script["params"].update(fields)
+
+            body["script"] = script
+            body["scripted_upsert"] = scripted_upsert
+
+        # partial document update
+        else:
+            if not fields:
+                raise IllegalOperation(
+                    "You cannot call update() without updating individual fields or a script. "
+                    "If you wish to update the entire object use save()."
+                )
+
+            # update given fields locally
+            merge(self, fields)
+
+            # prepare data for ES
+            values = self.to_dict(skip_empty=False)
+
+            # if fields were given: partial update
+            body["doc"] = {k: values.get(k) for k in fields.keys()}
+
+        # extract routing etc from meta
+        doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta}
+
+        if retry_on_conflict is not None:
+            doc_meta["retry_on_conflict"] = retry_on_conflict
+
+        # Optimistic concurrency control
+        if (
+            retry_on_conflict in (None, 0)
+            and "seq_no" in self.meta
+            and "primary_term" in self.meta
+        ):
+            doc_meta["if_seq_no"] = self.meta["seq_no"]
+            doc_meta["if_primary_term"] = self.meta["primary_term"]
+
+        i = self._get_index(index)
+        assert i is not None
+
+        meta = await self._get_connection(using).update(
+            index=i, body=body, refresh=refresh, **doc_meta
+        )
+
+        # update meta information from ES
+        for k in META_FIELDS:
+            if "_" + k in meta:
+                setattr(self.meta, k, meta["_" + k])
+
+        return meta if return_doc_meta else meta["result"]
+
+    async def save(
+        self,
+        using: Optional[AsyncUsingType] = None,
+        index: Optional[str] = None,
+        validate: bool = True,
+        skip_empty: bool = True,
+        return_doc_meta: bool = False,
+        **kwargs: Any,
+    ) -> Any:
+        """
+        Save the document into elasticsearch. If the document doesn't exist it
+        is created, it is overwritten otherwise. Returns ``True`` if this
+        operations resulted in new document being created.
+
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg validate: set to ``False`` to skip validating the document
+        :arg skip_empty: if set to ``False`` will cause empty values (``None``,
+            ``[]``, ``{}``) to be left on the document. Those values will be
+            stripped out otherwise as they make no difference in elasticsearch.
+        :arg return_doc_meta: set to ``True`` to return all metadata from the
+            update API call instead of only the operation result
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.index`` unchanged.
+
+        :return: operation result created/updated
+        """
+        if validate:
+            self.full_clean()
+
+        es = self._get_connection(using)
+        # extract routing etc from meta
+        doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta}
+
+        # Optimistic concurrency control
+        if "seq_no" in self.meta and "primary_term" in self.meta:
+            doc_meta["if_seq_no"] = self.meta["seq_no"]
+            doc_meta["if_primary_term"] = self.meta["primary_term"]
+
+        doc_meta.update(kwargs)
+        i = self._get_index(index)
+        assert i is not None
+
+        meta = await es.index(
+            index=i,
+            body=self.to_dict(skip_empty=skip_empty),
+            **doc_meta,
+        )
+        # update meta information from ES
+        for k in META_FIELDS:
+            if "_" + k in meta:
+                setattr(self.meta, k, meta["_" + k])
+
+        return meta if return_doc_meta else meta["result"]
+
+    @classmethod
+    async def bulk(
+        cls,
+        actions: AsyncIterable[Union[Self, Dict[str, Any]]],
+        using: Optional[AsyncUsingType] = None,
+        index: Optional[str] = None,
+        validate: bool = True,
+        skip_empty: bool = True,
+        **kwargs: Any,
+    ) -> Tuple[int, Union[int, List[Any]]]:
+        """
+        Allows to perform multiple indexing operations in a single request.
+
+        :arg actions: a generator that returns document instances to be indexed,
+            bulk operation dictionaries.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg index: Elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg validate: set to ``False`` to skip validating the documents
+        :arg skip_empty: if set to ``False`` will cause empty values (``None``,
+            ``[]``, ``{}``) to be left on the document. Those values will be
+            stripped out otherwise as they make no difference in Elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.bulk`` unchanged.
+
+        :return: bulk operation results
+        """
+        es = cls._get_connection(using)
+
+        i = cls._default_index(index)
+        assert i is not None
+
+        class Generate:
+            def __init__(
+                self,
+                doc_iterator: AsyncIterable[Union[AsyncDocument, Dict[str, Any]]],
+            ):
+                self.doc_iterator = doc_iterator.__aiter__()
+
+            def __aiter__(self) -> Self:
+                return self
+
+            async def __anext__(self) -> Dict[str, Any]:
+                doc: Optional[Union[AsyncDocument, Dict[str, Any]]] = (
+                    await self.doc_iterator.__anext__()
+                )
+
+                if isinstance(doc, dict):
+                    action = doc
+                    doc = None
+                    if "_source" in action and isinstance(
+                        action["_source"], AsyncDocument
+                    ):
+                        doc = action["_source"]
+                        if validate:  # pragma: no cover
+                            doc.full_clean()
+                        action["_source"] = doc.to_dict(
+                            include_meta=False, skip_empty=skip_empty
+                        )
+                elif doc is not None:
+                    if validate:  # pragma: no cover
+                        doc.full_clean()
+                    action = doc.to_dict(include_meta=True, skip_empty=skip_empty)
+                if "_index" not in action:
+                    action["_index"] = i
+                return action
+
+        return await async_bulk(es, Generate(actions), **kwargs)
+
+    @classmethod
+    async def esql_execute(
+        cls,
+        query: "ESQLBase",
+        return_additional: bool = False,
+        ignore_missing_fields: bool = False,
+        using: Optional[AsyncUsingType] = None,
+        **kwargs: Any,
+    ) -> AsyncIterator[Union[Self, Tuple[Self, Dict[str, Any]]]]:
+        """
+        Execute the given ES|QL query and return an iterator of 2-element tuples,
+        where the first element is an instance of this ``Document`` and the
+        second a dictionary with any remaining columns requested in the query.
+
+        :arg query: an ES|QL query object created with the ``esql_from()`` method.
+        :arg return_additional: if ``False`` (the default), this method returns
+            document objects. If set to ``True``, the method returns tuples with
+            a document in the first element and a dictionary with any additional
+            columns returned by the query in the second element.
+        :arg ignore_missing_fields: if ``False`` (the default), all the fields of
+            the document must be present in the query, or else an exception is
+            raised. Set to ``True`` to allow missing fields, which will result in
+            partially initialized document objects.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg kwargs: additional options for the ``client.esql.query()`` function.
+        """
+        es = cls._get_connection(using)
+        response = await es.esql.query(query=str(query), **kwargs)
+        query_columns = [col["name"] for col in response.body.get("columns", [])]
+
+        # Here we get the list of columns defined in the document, which are the
+        # columns that we will take from each result to assemble the document
+        # object.
+        # When `for_esql=False` is passed below by default, the list will include
+        # nested fields, which ES|QL does not return, causing an error. When passing
+        # `ignore_missing_fields=True` the list will be generated with
+        # `for_esql=True`, so the error will not occur, but the documents will
+        # not have any Nested objects in them.
+        doc_fields = set(cls._get_field_names(for_esql=ignore_missing_fields))
+        if not ignore_missing_fields and not doc_fields.issubset(set(query_columns)):
+            raise ValueError(
+                f"Not all fields of {cls.__name__} were returned by the query. "
+                "Make sure your document does not use Nested fields, which are "
+                "currently not supported in ES|QL. To force the query to be "
+                "evaluated in spite of the missing fields, pass set the "
+                "ignore_missing_fields=True option in the esql_execute() call."
+            )
+        non_doc_fields: set[str] = set(query_columns) - doc_fields - {"_id"}
+        index_id = query_columns.index("_id")
+
+        results = response.body.get("values", [])
+        for column_values in results:
+            # create a dictionary with all the document fields, expanding the
+            # dot notation returned by ES|QL into the recursive dictionaries
+            # used by Document.from_dict()
+            doc_dict: Dict[str, Any] = {}
+            for col, val in zip(query_columns, column_values):
+                if col in doc_fields:
+                    cols = col.split(".")
+                    d = doc_dict
+                    for c in cols[:-1]:
+                        if c not in d:
+                            d[c] = {}
+                        d = d[c]
+                    d[cols[-1]] = val
+
+            # create the document instance
+            obj = cls(meta={"_id": column_values[index_id]})
+            obj._from_dict(doc_dict)
+
+            if return_additional:
+                # build a dict with any other values included in the response
+                other = {
+                    col: val
+                    for col, val in zip(query_columns, column_values)
+                    if col in non_doc_fields
+                }
+
+                yield obj, other
+            else:
+                yield obj
diff -pruN 8.17.2-2/elasticsearch/dsl/_async/faceted_search.py 9.2.0-1/elasticsearch/dsl/_async/faceted_search.py
--- 8.17.2-2/elasticsearch/dsl/_async/faceted_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_async/faceted_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,50 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import TYPE_CHECKING
+
+from ..faceted_search_base import FacetedResponse, FacetedSearchBase
+from ..utils import _R
+from .search import AsyncSearch
+
+if TYPE_CHECKING:
+    from ..response import Response
+
+
+class AsyncFacetedSearch(FacetedSearchBase[_R]):
+    _s: AsyncSearch[_R]
+
+    async def count(self) -> int:
+        return await self._s.count()
+
+    def search(self) -> AsyncSearch[_R]:
+        """
+        Returns the base Search object to which the facets are added.
+
+        You can customize the query by overriding this method and returning a
+        modified search object.
+        """
+        s = AsyncSearch[_R](doc_type=self.doc_types, index=self.index, using=self.using)
+        return s.response_class(FacetedResponse)
+
+    async def execute(self) -> "Response[_R]":
+        """
+        Execute the search and return the response.
+        """
+        r = await self._s.execute()
+        r._faceted_search = self
+        return r
diff -pruN 8.17.2-2/elasticsearch/dsl/_async/index.py 9.2.0-1/elasticsearch/dsl/_async/index.py
--- 8.17.2-2/elasticsearch/dsl/_async/index.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_async/index.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,639 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import TYPE_CHECKING, Any, Dict, Optional
+
+from typing_extensions import Self
+
+from ..async_connections import get_connection
+from ..exceptions import IllegalOperation
+from ..index_base import IndexBase
+from ..utils import AsyncUsingType
+from .mapping import AsyncMapping
+from .search import AsyncSearch
+from .update_by_query import AsyncUpdateByQuery
+
+if TYPE_CHECKING:
+    from elastic_transport import ObjectApiResponse
+
+    from elasticsearch import AsyncElasticsearch
+
+
+class AsyncIndexTemplate:
+    def __init__(
+        self,
+        name: str,
+        template: str,
+        index: Optional["AsyncIndex"] = None,
+        order: Optional[int] = None,
+        **kwargs: Any,
+    ):
+        if index is None:
+            self._index = AsyncIndex(template, **kwargs)
+        else:
+            if kwargs:
+                raise ValueError(
+                    "You cannot specify options for Index when"
+                    " passing an Index instance."
+                )
+            self._index = index.clone()
+            self._index._name = template
+        self._template_name = name
+        self.order = order
+
+    def __getattr__(self, attr_name: str) -> Any:
+        return getattr(self._index, attr_name)
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = self._index.to_dict()
+        d["index_patterns"] = [self._index._name]
+        if self.order is not None:
+            d["order"] = self.order
+        return d
+
+    async def save(
+        self, using: Optional[AsyncUsingType] = None
+    ) -> "ObjectApiResponse[Any]":
+        es = get_connection(using or self._index._using)
+        return await es.indices.put_template(
+            name=self._template_name, body=self.to_dict()
+        )
+
+
+class AsyncComposableIndexTemplate:
+    def __init__(
+        self,
+        name: str,
+        template: str,
+        index: Optional["AsyncIndex"] = None,
+        priority: Optional[int] = None,
+        **kwargs: Any,
+    ):
+        if index is None:
+            self._index = AsyncIndex(template, **kwargs)
+        else:
+            if kwargs:
+                raise ValueError(
+                    "You cannot specify options for Index when"
+                    " passing an Index instance."
+                )
+            self._index = index.clone()
+            self._index._name = template
+        self._template_name = name
+        self.priority = priority
+
+    def __getattr__(self, attr_name: str) -> Any:
+        return getattr(self._index, attr_name)
+
+    def to_dict(self) -> Dict[str, Any]:
+        d: Dict[str, Any] = {"template": self._index.to_dict()}
+        d["index_patterns"] = [self._index._name]
+        if self.priority is not None:
+            d["priority"] = self.priority
+        return d
+
+    async def save(
+        self, using: Optional[AsyncUsingType] = None
+    ) -> "ObjectApiResponse[Any]":
+        es = get_connection(using or self._index._using)
+        return await es.indices.put_index_template(
+            name=self._template_name, **self.to_dict()
+        )
+
+
+class AsyncIndex(IndexBase):
+    _using: AsyncUsingType
+
+    if TYPE_CHECKING:
+
+        def get_or_create_mapping(self) -> AsyncMapping: ...
+
+    def __init__(self, name: str, using: AsyncUsingType = "default"):
+        """
+        :arg name: name of the index
+        :arg using: connection alias to use, defaults to ``'default'``
+        """
+        super().__init__(name, AsyncMapping, using=using)
+
+    def _get_connection(
+        self, using: Optional[AsyncUsingType] = None
+    ) -> "AsyncElasticsearch":
+        if self._name is None:
+            raise ValueError("You cannot perform API calls on the default index.")
+        return get_connection(using or self._using)
+
+    connection = property(_get_connection)
+
+    def as_template(
+        self,
+        template_name: str,
+        pattern: Optional[str] = None,
+        order: Optional[int] = None,
+    ) -> AsyncIndexTemplate:
+        return AsyncIndexTemplate(
+            template_name, pattern or self._name, index=self, order=order
+        )
+
+    def as_composable_template(
+        self,
+        template_name: str,
+        pattern: Optional[str] = None,
+        priority: Optional[int] = None,
+    ) -> AsyncComposableIndexTemplate:
+        return AsyncComposableIndexTemplate(
+            template_name, pattern or self._name, index=self, priority=priority
+        )
+
+    async def load_mappings(self, using: Optional[AsyncUsingType] = None) -> None:
+        await self.get_or_create_mapping().update_from_es(
+            self._name, using=using or self._using
+        )
+
+    def clone(
+        self, name: Optional[str] = None, using: Optional[AsyncUsingType] = None
+    ) -> Self:
+        """
+        Create a copy of the instance with another name or connection alias.
+        Useful for creating multiple indices with shared configuration::
+
+            i = Index('base-index')
+            i.settings(number_of_shards=1)
+            i.create()
+
+            i2 = i.clone('other-index')
+            i2.create()
+
+        :arg name: name of the index
+        :arg using: connection alias to use, defaults to ``'default'``
+        """
+        i = self.__class__(name or self._name, using=using or self._using)
+        i._settings = self._settings.copy()
+        i._aliases = self._aliases.copy()
+        i._analysis = self._analysis.copy()
+        i._doc_types = self._doc_types[:]
+        if self._mapping is not None:
+            i._mapping = self._mapping._clone()
+        return i
+
+    def search(self, using: Optional[AsyncUsingType] = None) -> AsyncSearch:
+        """
+        Return a :class:`~elasticsearch.dsl.Search` object searching over the
+        index (or all the indices belonging to this template) and its
+        ``Document``\\s.
+        """
+        return AsyncSearch(
+            using=using or self._using, index=self._name, doc_type=self._doc_types
+        )
+
+    def updateByQuery(
+        self, using: Optional[AsyncUsingType] = None
+    ) -> AsyncUpdateByQuery:
+        """
+        Return a :class:`~elasticsearch.dsl.UpdateByQuery` object searching over the index
+        (or all the indices belonging to this template) and updating Documents that match
+        the search criteria.
+
+        For more information, see here:
+        https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html
+        """
+        return AsyncUpdateByQuery(
+            using=using or self._using,
+            index=self._name,
+        )
+
+    async def create(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Creates the index in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.create`` unchanged.
+        """
+        return await self._get_connection(using).indices.create(
+            index=self._name, body=self.to_dict(), **kwargs
+        )
+
+    async def is_closed(self, using: Optional[AsyncUsingType] = None) -> bool:
+        state = await self._get_connection(using).cluster.state(
+            index=self._name, metric="metadata"
+        )
+        return bool(state["metadata"]["indices"][self._name]["state"] == "close")
+
+    async def save(
+        self, using: Optional[AsyncUsingType] = None
+    ) -> "Optional[ObjectApiResponse[Any]]":
+        """
+        Sync the index definition with elasticsearch, creating the index if it
+        doesn't exist and updating its settings and mappings if it does.
+
+        Note some settings and mapping changes cannot be done on an open
+        index (or at all on an existing index) and for those this method will
+        fail with the underlying exception.
+        """
+        if not await self.exists(using=using):
+            return await self.create(using=using)
+
+        body = self.to_dict()
+        settings = body.pop("settings", {})
+        analysis = settings.pop("analysis", None)
+        current_settings = (await self.get_settings(using=using))[self._name][
+            "settings"
+        ]["index"]
+        if analysis:
+            if await self.is_closed(using=using):
+                # closed index, update away
+                settings["analysis"] = analysis
+            else:
+                # compare analysis definition, if all analysis objects are
+                # already defined as requested, skip analysis update and
+                # proceed, otherwise raise IllegalOperation
+                existing_analysis = current_settings.get("analysis", {})
+                if any(
+                    existing_analysis.get(section, {}).get(k, None)
+                    != analysis[section][k]
+                    for section in analysis
+                    for k in analysis[section]
+                ):
+                    raise IllegalOperation(
+                        "You cannot update analysis configuration on an open index, "
+                        "you need to close index %s first." % self._name
+                    )
+
+        # try and update the settings
+        if settings:
+            settings = settings.copy()
+            for k, v in list(settings.items()):
+                if k in current_settings and current_settings[k] == str(v):
+                    del settings[k]
+
+            if settings:
+                await self.put_settings(using=using, body=settings)
+
+        # update the mappings, any conflict in the mappings will result in an
+        # exception
+        mappings = body.pop("mappings", {})
+        if mappings:
+            return await self.put_mapping(using=using, body=mappings)
+
+        return None
+
+    async def analyze(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Perform the analysis process on a text and return the tokens breakdown
+        of the text.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.analyze`` unchanged.
+        """
+        return await self._get_connection(using).indices.analyze(
+            index=self._name, **kwargs
+        )
+
+    async def refresh(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Performs a refresh operation on the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.refresh`` unchanged.
+        """
+        return await self._get_connection(using).indices.refresh(
+            index=self._name, **kwargs
+        )
+
+    async def flush(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Performs a flush operation on the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.flush`` unchanged.
+        """
+        return await self._get_connection(using).indices.flush(
+            index=self._name, **kwargs
+        )
+
+    async def get(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        The get index API allows to retrieve information about the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get`` unchanged.
+        """
+        return await self._get_connection(using).indices.get(index=self._name, **kwargs)
+
+    async def open(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Opens the index in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.open`` unchanged.
+        """
+        return await self._get_connection(using).indices.open(
+            index=self._name, **kwargs
+        )
+
+    async def close(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Closes the index in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.close`` unchanged.
+        """
+        return await self._get_connection(using).indices.close(
+            index=self._name, **kwargs
+        )
+
+    async def delete(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Deletes the index in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.delete`` unchanged.
+        """
+        return await self._get_connection(using).indices.delete(
+            index=self._name, **kwargs
+        )
+
+    async def exists(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> bool:
+        """
+        Returns ``True`` if the index already exists in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.exists`` unchanged.
+        """
+        return bool(
+            await self._get_connection(using).indices.exists(index=self._name, **kwargs)
+        )
+
+    async def put_mapping(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Register specific mapping definition for a specific type.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.put_mapping`` unchanged.
+        """
+        return await self._get_connection(using).indices.put_mapping(
+            index=self._name, **kwargs
+        )
+
+    async def get_mapping(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve specific mapping definition for a specific type.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get_mapping`` unchanged.
+        """
+        return await self._get_connection(using).indices.get_mapping(
+            index=self._name, **kwargs
+        )
+
+    async def get_field_mapping(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve mapping definition of a specific field.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get_field_mapping`` unchanged.
+        """
+        return await self._get_connection(using).indices.get_field_mapping(
+            index=self._name, **kwargs
+        )
+
+    async def put_alias(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Create an alias for the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.put_alias`` unchanged.
+        """
+        return await self._get_connection(using).indices.put_alias(
+            index=self._name, **kwargs
+        )
+
+    async def exists_alias(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> bool:
+        """
+        Return a boolean indicating whether given alias exists for this index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.exists_alias`` unchanged.
+        """
+        return bool(
+            await self._get_connection(using).indices.exists_alias(
+                index=self._name, **kwargs
+            )
+        )
+
+    async def get_alias(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve a specified alias.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get_alias`` unchanged.
+        """
+        return await self._get_connection(using).indices.get_alias(
+            index=self._name, **kwargs
+        )
+
+    async def delete_alias(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Delete specific alias.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.delete_alias`` unchanged.
+        """
+        return await self._get_connection(using).indices.delete_alias(
+            index=self._name, **kwargs
+        )
+
+    async def get_settings(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve settings for the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get_settings`` unchanged.
+        """
+        return await self._get_connection(using).indices.get_settings(
+            index=self._name, **kwargs
+        )
+
+    async def put_settings(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Change specific index level settings in real time.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.put_settings`` unchanged.
+        """
+        return await self._get_connection(using).indices.put_settings(
+            index=self._name, **kwargs
+        )
+
+    async def stats(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve statistics on different operations happening on the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.stats`` unchanged.
+        """
+        return await self._get_connection(using).indices.stats(
+            index=self._name, **kwargs
+        )
+
+    async def segments(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Provide low level segments information that a Lucene index (shard
+        level) is built with.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.segments`` unchanged.
+        """
+        return await self._get_connection(using).indices.segments(
+            index=self._name, **kwargs
+        )
+
+    async def validate_query(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Validate a potentially expensive query without executing it.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.validate_query`` unchanged.
+        """
+        return await self._get_connection(using).indices.validate_query(
+            index=self._name, **kwargs
+        )
+
+    async def clear_cache(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Clear all caches or specific cached associated with the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.clear_cache`` unchanged.
+        """
+        return await self._get_connection(using).indices.clear_cache(
+            index=self._name, **kwargs
+        )
+
+    async def recovery(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        The indices recovery API provides insight into on-going shard
+        recoveries for the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.recovery`` unchanged.
+        """
+        return await self._get_connection(using).indices.recovery(
+            index=self._name, **kwargs
+        )
+
+    async def shard_stores(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Provides store information for shard copies of the index. Store
+        information reports on which nodes shard copies exist, the shard copy
+        version, indicating how recent they are, and any exceptions encountered
+        while opening the shard index or from earlier engine failure.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.shard_stores`` unchanged.
+        """
+        return await self._get_connection(using).indices.shard_stores(
+            index=self._name, **kwargs
+        )
+
+    async def forcemerge(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        The force merge API allows to force merging of the index through an
+        API. The merge relates to the number of segments a Lucene index holds
+        within each shard. The force merge operation allows to reduce the
+        number of segments by merging them.
+
+        This call will block until the merge is complete. If the http
+        connection is lost, the request will continue in the background, and
+        any new requests will block until the previous force merge is complete.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.forcemerge`` unchanged.
+        """
+        return await self._get_connection(using).indices.forcemerge(
+            index=self._name, **kwargs
+        )
+
+    async def shrink(
+        self, using: Optional[AsyncUsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        The shrink index API allows you to shrink an existing index into a new
+        index with fewer primary shards. The number of primary shards in the
+        target index must be a factor of the shards in the source index. For
+        example an index with 8 primary shards can be shrunk into 4, 2 or 1
+        primary shards or an index with 15 primary shards can be shrunk into 5,
+        3 or 1. If the number of shards in the index is a prime number it can
+        only be shrunk into a single primary shard. Before shrinking, a
+        (primary or replica) copy of every shard in the index must be present
+        on the same node.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.shrink`` unchanged.
+        """
+        return await self._get_connection(using).indices.shrink(
+            index=self._name, **kwargs
+        )
diff -pruN 8.17.2-2/elasticsearch/dsl/_async/mapping.py 9.2.0-1/elasticsearch/dsl/_async/mapping.py
--- 8.17.2-2/elasticsearch/dsl/_async/mapping.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_async/mapping.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,49 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import List, Optional, Union
+
+from typing_extensions import Self
+
+from ..async_connections import get_connection
+from ..mapping_base import MappingBase
+from ..utils import AsyncUsingType
+
+
+class AsyncMapping(MappingBase):
+    @classmethod
+    async def from_es(
+        cls, index: Optional[Union[str, List[str]]], using: AsyncUsingType = "default"
+    ) -> Self:
+        m = cls()
+        await m.update_from_es(index, using)
+        return m
+
+    async def update_from_es(
+        self, index: Optional[Union[str, List[str]]], using: AsyncUsingType = "default"
+    ) -> None:
+        es = get_connection(using)
+        raw = await es.indices.get_mapping(index=index)
+        _, raw = raw.popitem()
+        self._update_from_dict(raw["mappings"])
+
+    async def save(self, index: str, using: AsyncUsingType = "default") -> None:
+        from .index import AsyncIndex
+
+        i = AsyncIndex(index, using=using)
+        i.mapping(self)
+        await i.save()
diff -pruN 8.17.2-2/elasticsearch/dsl/_async/search.py 9.2.0-1/elasticsearch/dsl/_async/search.py
--- 8.17.2-2/elasticsearch/dsl/_async/search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_async/search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,237 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import contextlib
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    AsyncIterator,
+    Dict,
+    Iterator,
+    List,
+    Optional,
+    cast,
+)
+
+from typing_extensions import Self
+
+from elasticsearch.exceptions import ApiError
+from elasticsearch.helpers import async_scan
+
+from ..async_connections import get_connection
+from ..response import Response
+from ..search_base import MultiSearchBase, SearchBase
+from ..utils import _R, AsyncUsingType, AttrDict
+
+
+class AsyncSearch(SearchBase[_R]):
+    _using: AsyncUsingType
+
+    def __aiter__(self) -> AsyncIterator[_R]:
+        """
+        Iterate over the hits.
+        """
+
+        class ResultsIterator(AsyncIterator[_R]):
+            def __init__(self, search: AsyncSearch[_R]):
+                self.search = search
+                self.iterator: Optional[Iterator[_R]] = None
+
+            async def __anext__(self) -> _R:
+                if self.iterator is None:
+                    self.iterator = iter(await self.search.execute())
+                try:
+                    return next(self.iterator)
+                except StopIteration:
+                    raise StopAsyncIteration()
+
+        return ResultsIterator(self)
+
+    async def count(self) -> int:
+        """
+        Return the number of hits matching the query and filters. Note that
+        only the actual number is returned.
+        """
+        if hasattr(self, "_response") and self._response.hits.total.relation == "eq":  # type: ignore[attr-defined]
+            return cast(int, self._response.hits.total.value)  # type: ignore[attr-defined]
+
+        es = get_connection(self._using)
+
+        d = self.to_dict(count=True)
+        # TODO: failed shards detection
+        resp = await es.count(
+            index=self._index,
+            query=cast(Optional[Dict[str, Any]], d.get("query", None)),
+            **self._params,
+        )
+
+        return cast(int, resp["count"])
+
+    async def execute(self, ignore_cache: bool = False) -> Response[_R]:
+        """
+        Execute the search and return an instance of ``Response`` wrapping all
+        the data.
+
+        :arg ignore_cache: if set to ``True``, consecutive calls will hit
+            ES, while cached result will be ignored. Defaults to `False`
+        """
+        if ignore_cache or not hasattr(self, "_response"):
+            es = get_connection(self._using)
+
+            self._response = self._response_class(
+                self,
+                (
+                    await es.search(
+                        index=self._index, body=self.to_dict(), **self._params
+                    )
+                ).body,
+            )
+        return self._response
+
+    async def scan(self) -> AsyncIterator[_R]:
+        """
+        Turn the search into a scan search and return a generator that will
+        iterate over all the documents matching the query.
+
+        Use the ``params`` method to specify any additional arguments you wish to
+        pass to the underlying ``scan`` helper from ``elasticsearch-py`` -
+        https://elasticsearch-py.readthedocs.io/en/latest/helpers.html#scan
+
+        The ``iterate()`` method should be preferred, as it provides similar
+        functionality using an Elasticsearch point in time.
+        """
+        es = get_connection(self._using)
+
+        async for hit in async_scan(
+            es, query=self.to_dict(), index=self._index, **self._params
+        ):
+            yield self._get_result(cast(AttrDict[Any], hit))
+
+    async def delete(self) -> AttrDict[Any]:
+        """
+        ``delete()`` executes the query by delegating to ``delete_by_query()``.
+
+        Use the ``params`` method to specify any additional arguments you wish to
+        pass to the underlying ``delete_by_query`` helper from ``elasticsearch-py`` -
+        https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.delete_by_query
+        """
+
+        es = get_connection(self._using)
+        assert self._index is not None
+
+        return AttrDict(
+            cast(
+                Dict[str, Any],
+                await es.delete_by_query(
+                    index=self._index, body=self.to_dict(), **self._params
+                ),
+            )
+        )
+
+    @contextlib.asynccontextmanager
+    async def point_in_time(self, keep_alive: str = "1m") -> AsyncIterator[Self]:
+        """
+        Open a point in time (pit) that can be used across several searches.
+
+        This method implements a context manager that returns a search object
+        configured to operate within the created pit.
+
+        :arg keep_alive: the time to live for the point in time, renewed with each search request
+        """
+        es = get_connection(self._using)
+
+        pit = await es.open_point_in_time(
+            index=self._index or "*", keep_alive=keep_alive
+        )
+        search = self.index().extra(pit={"id": pit["id"], "keep_alive": keep_alive})
+        if not search._sort:
+            search = search.sort("_shard_doc")
+        yield search
+        await es.close_point_in_time(id=pit["id"])
+
+    async def iterate(self, keep_alive: str = "1m") -> AsyncIterator[_R]:
+        """
+        Return a generator that iterates over all the documents matching the query.
+
+        This method uses a point in time to provide consistent results even when
+        the index is changing. It should be preferred over ``scan()``.
+
+        :arg keep_alive: the time to live for the point in time, renewed with each new search request
+        """
+        async with self.point_in_time(keep_alive=keep_alive) as s:
+            while True:
+                r = await s.execute()
+                for hit in r:
+                    yield hit
+                if len(r.hits) == 0:
+                    break
+                s = s.search_after()
+
+
+class AsyncMultiSearch(MultiSearchBase[_R]):
+    """
+    Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single
+    request.
+    """
+
+    _using: AsyncUsingType
+
+    if TYPE_CHECKING:
+
+        def add(self, search: AsyncSearch[_R]) -> Self: ...  # type: ignore[override]
+
+    async def execute(
+        self, ignore_cache: bool = False, raise_on_error: bool = True
+    ) -> List[Response[_R]]:
+        """
+        Execute the multi search request and return a list of search results.
+        """
+        if ignore_cache or not hasattr(self, "_response"):
+            es = get_connection(self._using)
+
+            responses = await es.msearch(
+                index=self._index, body=self.to_dict(), **self._params
+            )
+
+            out: List[Response[_R]] = []
+            for s, r in zip(self._searches, responses["responses"]):
+                if r.get("error", False):
+                    if raise_on_error:
+                        raise ApiError("N/A", meta=responses.meta, body=r)
+                    r = None
+                else:
+                    r = Response(s, r)
+                out.append(r)
+
+            self._response = out
+
+        return self._response
+
+
+class AsyncEmptySearch(AsyncSearch[_R]):
+    async def count(self) -> int:
+        return 0
+
+    async def execute(self, ignore_cache: bool = False) -> Response[_R]:
+        return self._response_class(self, {"hits": {"total": 0, "hits": []}})
+
+    async def scan(self) -> AsyncIterator[_R]:
+        return
+        yield  # a bit strange, but this forces an empty generator function
+
+    async def delete(self) -> AttrDict[Any]:
+        return AttrDict[Any]({})
diff -pruN 8.17.2-2/elasticsearch/dsl/_async/update_by_query.py 9.2.0-1/elasticsearch/dsl/_async/update_by_query.py
--- 8.17.2-2/elasticsearch/dsl/_async/update_by_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_async/update_by_query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,47 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import TYPE_CHECKING
+
+from ..async_connections import get_connection
+from ..update_by_query_base import UpdateByQueryBase
+from ..utils import _R, AsyncUsingType
+
+if TYPE_CHECKING:
+    from ..response import UpdateByQueryResponse
+
+
+class AsyncUpdateByQuery(UpdateByQueryBase[_R]):
+    _using: AsyncUsingType
+
+    async def execute(self) -> "UpdateByQueryResponse[_R]":
+        """
+        Execute the search and return an instance of ``Response`` wrapping all
+        the data.
+        """
+        es = get_connection(self._using)
+        assert self._index is not None
+
+        self._response = self._response_class(
+            self,
+            (
+                await es.update_by_query(
+                    index=self._index, **self.to_dict(), **self._params
+                )
+            ).body,
+        )
+        return self._response
diff -pruN 8.17.2-2/elasticsearch/dsl/_sync/__init__.py 9.2.0-1/elasticsearch/dsl/_sync/__init__.py
--- 8.17.2-2/elasticsearch/dsl/_sync/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_sync/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/elasticsearch/dsl/_sync/document.py 9.2.0-1/elasticsearch/dsl/_sync/document.py
--- 8.17.2-2/elasticsearch/dsl/_sync/document.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_sync/document.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,599 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Iterable,
+    Iterator,
+    List,
+    Optional,
+    Tuple,
+    Union,
+    cast,
+)
+
+from typing_extensions import Self, dataclass_transform
+
+from elasticsearch.exceptions import NotFoundError, RequestError
+from elasticsearch.helpers import bulk
+
+from .._sync.index import Index
+from ..connections import get_connection
+from ..document_base import DocumentBase, DocumentMeta, mapped_field
+from ..exceptions import IllegalOperation
+from ..utils import DOC_META_FIELDS, META_FIELDS, UsingType, merge
+from .search import Search
+
+if TYPE_CHECKING:
+    from elasticsearch import Elasticsearch
+    from elasticsearch.esql.esql import ESQLBase
+
+
+class IndexMeta(DocumentMeta):
+    _index: Index
+
+    # global flag to guard us from associating an Index with the base Document
+    # class, only user defined subclasses should have an _index attr
+    _document_initialized = False
+
+    def __new__(
+        cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]
+    ) -> "IndexMeta":
+        new_cls = super().__new__(cls, name, bases, attrs)
+        if cls._document_initialized:
+            index_opts = attrs.pop("Index", None)
+            index = cls.construct_index(index_opts, bases)
+            new_cls._index = index
+            index.document(new_cls)
+        cls._document_initialized = True
+        return cast(IndexMeta, new_cls)
+
+    @classmethod
+    def construct_index(cls, opts: Dict[str, Any], bases: Tuple[type, ...]) -> Index:
+        if opts is None:
+            for b in bases:
+                if hasattr(b, "_index"):
+                    return b._index
+
+            # Set None as Index name so it will set _all while making the query
+            return Index(name=None)
+
+        i = Index(getattr(opts, "name", "*"), using=getattr(opts, "using", "default"))
+        i.settings(**getattr(opts, "settings", {}))
+        i.aliases(**getattr(opts, "aliases", {}))
+        for a in getattr(opts, "analyzers", ()):
+            i.analyzer(a)
+        return i
+
+
+@dataclass_transform(field_specifiers=(mapped_field,))
+class Document(DocumentBase, metaclass=IndexMeta):
+    """
+    Model-like class for persisting documents in elasticsearch.
+    """
+
+    if TYPE_CHECKING:
+        _index: Index
+
+    @classmethod
+    def _get_using(cls, using: Optional[UsingType] = None) -> UsingType:
+        return using or cls._index._using
+
+    @classmethod
+    def _get_connection(cls, using: Optional[UsingType] = None) -> "Elasticsearch":
+        return get_connection(cls._get_using(using))
+
+    @classmethod
+    def init(
+        cls, index: Optional[str] = None, using: Optional[UsingType] = None
+    ) -> None:
+        """
+        Create the index and populate the mappings in elasticsearch.
+        """
+        i = cls._index
+        if index:
+            i = i.clone(name=index)
+        i.save(using=using)
+
+    @classmethod
+    def search(
+        cls, using: Optional[UsingType] = None, index: Optional[str] = None
+    ) -> Search[Self]:
+        """
+        Create an :class:`~elasticsearch.dsl.Search` instance that will search
+        over this ``Document``.
+        """
+        s = Search[Self](
+            using=cls._get_using(using), index=cls._default_index(index), doc_type=[cls]
+        )
+        return s.source(exclude_vectors=False)
+
+    @classmethod
+    def get(
+        cls,
+        id: str,
+        using: Optional[UsingType] = None,
+        index: Optional[str] = None,
+        **kwargs: Any,
+    ) -> Optional[Self]:
+        """
+        Retrieve a single document from elasticsearch using its ``id``.
+
+        :arg id: ``id`` of the document to be retrieved
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.get`` unchanged.
+        """
+        es = cls._get_connection(using)
+        doc = es.get(index=cls._default_index(index), id=id, **kwargs)
+        if not doc.get("found", False):
+            return None
+        return cls.from_es(doc)
+
+    @classmethod
+    def exists(
+        cls,
+        id: str,
+        using: Optional[UsingType] = None,
+        index: Optional[str] = None,
+        **kwargs: Any,
+    ) -> bool:
+        """
+        check if exists a single document from elasticsearch using its ``id``.
+
+        :arg id: ``id`` of the document to check if exists
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.exists`` unchanged.
+        """
+        es = cls._get_connection(using)
+        return bool(es.exists(index=cls._default_index(index), id=id, **kwargs))
+
+    @classmethod
+    def mget(
+        cls,
+        docs: List[Dict[str, Any]],
+        using: Optional[UsingType] = None,
+        index: Optional[str] = None,
+        raise_on_error: bool = True,
+        missing: str = "none",
+        **kwargs: Any,
+    ) -> List[Optional[Self]]:
+        r"""
+        Retrieve multiple document by their ``id``\s. Returns a list of instances
+        in the same order as requested.
+
+        :arg docs: list of ``id``\s of the documents to be retrieved or a list
+            of document specifications as per
+            https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg missing: what to do when one of the documents requested is not
+            found. Valid options are ``'none'`` (use ``None``), ``'raise'`` (raise
+            ``NotFoundError``) or ``'skip'`` (ignore the missing document).
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.mget`` unchanged.
+        """
+        if missing not in ("raise", "skip", "none"):
+            raise ValueError("'missing' must be 'raise', 'skip', or 'none'.")
+        es = cls._get_connection(using)
+        body = {
+            "docs": [
+                doc if isinstance(doc, collections.abc.Mapping) else {"_id": doc}
+                for doc in docs
+            ]
+        }
+        results = es.mget(index=cls._default_index(index), body=body, **kwargs)
+
+        objs: List[Optional[Self]] = []
+        error_docs: List[Self] = []
+        missing_docs: List[Self] = []
+        for doc in results["docs"]:
+            if doc.get("found"):
+                if error_docs or missing_docs:
+                    # We're going to raise an exception anyway, so avoid an
+                    # expensive call to cls.from_es().
+                    continue
+
+                objs.append(cls.from_es(doc))
+
+            elif doc.get("error"):
+                if raise_on_error:
+                    error_docs.append(doc)
+                if missing == "none":
+                    objs.append(None)
+
+            # The doc didn't cause an error, but the doc also wasn't found.
+            elif missing == "raise":
+                missing_docs.append(doc)
+            elif missing == "none":
+                objs.append(None)
+
+        if error_docs:
+            error_ids = [doc["_id"] for doc in error_docs]
+            message = "Required routing not provided for documents %s."
+            message %= ", ".join(error_ids)
+            raise RequestError(400, message, error_docs)  # type: ignore[arg-type]
+        if missing_docs:
+            missing_ids = [doc["_id"] for doc in missing_docs]
+            message = f"Documents {', '.join(missing_ids)} not found."
+            raise NotFoundError(404, message, {"docs": missing_docs})  # type: ignore[arg-type]
+        return objs
+
+    def delete(
+        self,
+        using: Optional[UsingType] = None,
+        index: Optional[str] = None,
+        **kwargs: Any,
+    ) -> None:
+        """
+        Delete the instance in elasticsearch.
+
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.delete`` unchanged.
+        """
+        es = self._get_connection(using)
+        # extract routing etc from meta
+        doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta}
+
+        # Optimistic concurrency control
+        if "seq_no" in self.meta and "primary_term" in self.meta:
+            doc_meta["if_seq_no"] = self.meta["seq_no"]
+            doc_meta["if_primary_term"] = self.meta["primary_term"]
+
+        doc_meta.update(kwargs)
+        i = self._get_index(index)
+        assert i is not None
+
+        es.delete(index=i, **doc_meta)
+
+    def update(
+        self,
+        using: Optional[UsingType] = None,
+        index: Optional[str] = None,
+        detect_noop: bool = True,
+        doc_as_upsert: bool = False,
+        refresh: bool = False,
+        retry_on_conflict: Optional[int] = None,
+        script: Optional[Union[str, Dict[str, Any]]] = None,
+        script_id: Optional[str] = None,
+        scripted_upsert: bool = False,
+        upsert: Optional[Dict[str, Any]] = None,
+        return_doc_meta: bool = False,
+        **fields: Any,
+    ) -> Any:
+        """
+        Partial update of the document, specify fields you wish to update and
+        both the instance and the document in elasticsearch will be updated::
+
+            doc = MyDocument(title='Document Title!')
+            doc.save()
+            doc.update(title='New Document Title!')
+
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg detect_noop: Set to ``False`` to disable noop detection.
+        :arg refresh: Control when the changes made by this request are visible
+            to search. Set to ``True`` for immediate effect.
+        :arg retry_on_conflict: In between the get and indexing phases of the
+            update, it is possible that another process might have already
+            updated the same document. By default, the update will fail with a
+            version conflict exception. The retry_on_conflict parameter
+            controls how many times to retry the update before finally throwing
+            an exception.
+        :arg doc_as_upsert:  Instead of sending a partial doc plus an upsert
+            doc, setting doc_as_upsert to true will use the contents of doc as
+            the upsert value
+        :arg script: the source code of the script as a string, or a dictionary
+            with script attributes to update.
+        :arg return_doc_meta: set to ``True`` to return all metadata from the
+            index API call instead of only the operation result
+
+        :return: operation result noop/updated
+        """
+        body: Dict[str, Any] = {
+            "doc_as_upsert": doc_as_upsert,
+            "detect_noop": detect_noop,
+        }
+
+        # scripted update
+        if script or script_id:
+            if upsert is not None:
+                body["upsert"] = upsert
+
+            if script:
+                if isinstance(script, str):
+                    script = {"source": script}
+            else:
+                script = {"id": script_id}
+
+            if "params" not in script:
+                script["params"] = fields
+            else:
+                script["params"].update(fields)
+
+            body["script"] = script
+            body["scripted_upsert"] = scripted_upsert
+
+        # partial document update
+        else:
+            if not fields:
+                raise IllegalOperation(
+                    "You cannot call update() without updating individual fields or a script. "
+                    "If you wish to update the entire object use save()."
+                )
+
+            # update given fields locally
+            merge(self, fields)
+
+            # prepare data for ES
+            values = self.to_dict(skip_empty=False)
+
+            # if fields were given: partial update
+            body["doc"] = {k: values.get(k) for k in fields.keys()}
+
+        # extract routing etc from meta
+        doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta}
+
+        if retry_on_conflict is not None:
+            doc_meta["retry_on_conflict"] = retry_on_conflict
+
+        # Optimistic concurrency control
+        if (
+            retry_on_conflict in (None, 0)
+            and "seq_no" in self.meta
+            and "primary_term" in self.meta
+        ):
+            doc_meta["if_seq_no"] = self.meta["seq_no"]
+            doc_meta["if_primary_term"] = self.meta["primary_term"]
+
+        i = self._get_index(index)
+        assert i is not None
+
+        meta = self._get_connection(using).update(
+            index=i, body=body, refresh=refresh, **doc_meta
+        )
+
+        # update meta information from ES
+        for k in META_FIELDS:
+            if "_" + k in meta:
+                setattr(self.meta, k, meta["_" + k])
+
+        return meta if return_doc_meta else meta["result"]
+
+    def save(
+        self,
+        using: Optional[UsingType] = None,
+        index: Optional[str] = None,
+        validate: bool = True,
+        skip_empty: bool = True,
+        return_doc_meta: bool = False,
+        **kwargs: Any,
+    ) -> Any:
+        """
+        Save the document into elasticsearch. If the document doesn't exist it
+        is created, it is overwritten otherwise. Returns ``True`` if this
+        operations resulted in new document being created.
+
+        :arg index: elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg validate: set to ``False`` to skip validating the document
+        :arg skip_empty: if set to ``False`` will cause empty values (``None``,
+            ``[]``, ``{}``) to be left on the document. Those values will be
+            stripped out otherwise as they make no difference in elasticsearch.
+        :arg return_doc_meta: set to ``True`` to return all metadata from the
+            update API call instead of only the operation result
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.index`` unchanged.
+
+        :return: operation result created/updated
+        """
+        if validate:
+            self.full_clean()
+
+        es = self._get_connection(using)
+        # extract routing etc from meta
+        doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta}
+
+        # Optimistic concurrency control
+        if "seq_no" in self.meta and "primary_term" in self.meta:
+            doc_meta["if_seq_no"] = self.meta["seq_no"]
+            doc_meta["if_primary_term"] = self.meta["primary_term"]
+
+        doc_meta.update(kwargs)
+        i = self._get_index(index)
+        assert i is not None
+
+        meta = es.index(
+            index=i,
+            body=self.to_dict(skip_empty=skip_empty),
+            **doc_meta,
+        )
+        # update meta information from ES
+        for k in META_FIELDS:
+            if "_" + k in meta:
+                setattr(self.meta, k, meta["_" + k])
+
+        return meta if return_doc_meta else meta["result"]
+
+    @classmethod
+    def bulk(
+        cls,
+        actions: Iterable[Union[Self, Dict[str, Any]]],
+        using: Optional[UsingType] = None,
+        index: Optional[str] = None,
+        validate: bool = True,
+        skip_empty: bool = True,
+        **kwargs: Any,
+    ) -> Tuple[int, Union[int, List[Any]]]:
+        """
+        Allows to perform multiple indexing operations in a single request.
+
+        :arg actions: a generator that returns document instances to be indexed,
+            bulk operation dictionaries.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg index: Elasticsearch index to use, if the ``Document`` is
+            associated with an index this can be omitted.
+        :arg validate: set to ``False`` to skip validating the documents
+        :arg skip_empty: if set to ``False`` will cause empty values (``None``,
+            ``[]``, ``{}``) to be left on the document. Those values will be
+            stripped out otherwise as they make no difference in Elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.bulk`` unchanged.
+
+        :return: bulk operation results
+        """
+        es = cls._get_connection(using)
+
+        i = cls._default_index(index)
+        assert i is not None
+
+        class Generate:
+            def __init__(
+                self,
+                doc_iterator: Iterable[Union[Document, Dict[str, Any]]],
+            ):
+                self.doc_iterator = doc_iterator.__iter__()
+
+            def __iter__(self) -> Self:
+                return self
+
+            def __next__(self) -> Dict[str, Any]:
+                doc: Optional[Union[Document, Dict[str, Any]]] = (
+                    self.doc_iterator.__next__()
+                )
+
+                if isinstance(doc, dict):
+                    action = doc
+                    doc = None
+                    if "_source" in action and isinstance(action["_source"], Document):
+                        doc = action["_source"]
+                        if validate:  # pragma: no cover
+                            doc.full_clean()
+                        action["_source"] = doc.to_dict(
+                            include_meta=False, skip_empty=skip_empty
+                        )
+                elif doc is not None:
+                    if validate:  # pragma: no cover
+                        doc.full_clean()
+                    action = doc.to_dict(include_meta=True, skip_empty=skip_empty)
+                if "_index" not in action:
+                    action["_index"] = i
+                return action
+
+        return bulk(es, Generate(actions), **kwargs)
+
+    @classmethod
+    def esql_execute(
+        cls,
+        query: "ESQLBase",
+        return_additional: bool = False,
+        ignore_missing_fields: bool = False,
+        using: Optional[UsingType] = None,
+        **kwargs: Any,
+    ) -> Iterator[Union[Self, Tuple[Self, Dict[str, Any]]]]:
+        """
+        Execute the given ES|QL query and return an iterator of 2-element tuples,
+        where the first element is an instance of this ``Document`` and the
+        second a dictionary with any remaining columns requested in the query.
+
+        :arg query: an ES|QL query object created with the ``esql_from()`` method.
+        :arg return_additional: if ``False`` (the default), this method returns
+            document objects. If set to ``True``, the method returns tuples with
+            a document in the first element and a dictionary with any additional
+            columns returned by the query in the second element.
+        :arg ignore_missing_fields: if ``False`` (the default), all the fields of
+            the document must be present in the query, or else an exception is
+            raised. Set to ``True`` to allow missing fields, which will result in
+            partially initialized document objects.
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg kwargs: additional options for the ``client.esql.query()`` function.
+        """
+        es = cls._get_connection(using)
+        response = es.esql.query(query=str(query), **kwargs)
+        query_columns = [col["name"] for col in response.body.get("columns", [])]
+
+        # Here we get the list of columns defined in the document, which are the
+        # columns that we will take from each result to assemble the document
+        # object.
+        # When `for_esql=False` is passed below by default, the list will include
+        # nested fields, which ES|QL does not return, causing an error. When passing
+        # `ignore_missing_fields=True` the list will be generated with
+        # `for_esql=True`, so the error will not occur, but the documents will
+        # not have any Nested objects in them.
+        doc_fields = set(cls._get_field_names(for_esql=ignore_missing_fields))
+        if not ignore_missing_fields and not doc_fields.issubset(set(query_columns)):
+            raise ValueError(
+                f"Not all fields of {cls.__name__} were returned by the query. "
+                "Make sure your document does not use Nested fields, which are "
+                "currently not supported in ES|QL. To force the query to be "
+                "evaluated in spite of the missing fields, pass set the "
+                "ignore_missing_fields=True option in the esql_execute() call."
+            )
+        non_doc_fields: set[str] = set(query_columns) - doc_fields - {"_id"}
+        index_id = query_columns.index("_id")
+
+        results = response.body.get("values", [])
+        for column_values in results:
+            # create a dictionary with all the document fields, expanding the
+            # dot notation returned by ES|QL into the recursive dictionaries
+            # used by Document.from_dict()
+            doc_dict: Dict[str, Any] = {}
+            for col, val in zip(query_columns, column_values):
+                if col in doc_fields:
+                    cols = col.split(".")
+                    d = doc_dict
+                    for c in cols[:-1]:
+                        if c not in d:
+                            d[c] = {}
+                        d = d[c]
+                    d[cols[-1]] = val
+
+            # create the document instance
+            obj = cls(meta={"_id": column_values[index_id]})
+            obj._from_dict(doc_dict)
+
+            if return_additional:
+                # build a dict with any other values included in the response
+                other = {
+                    col: val
+                    for col, val in zip(query_columns, column_values)
+                    if col in non_doc_fields
+                }
+
+                yield obj, other
+            else:
+                yield obj
diff -pruN 8.17.2-2/elasticsearch/dsl/_sync/faceted_search.py 9.2.0-1/elasticsearch/dsl/_sync/faceted_search.py
--- 8.17.2-2/elasticsearch/dsl/_sync/faceted_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_sync/faceted_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,50 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import TYPE_CHECKING
+
+from ..faceted_search_base import FacetedResponse, FacetedSearchBase
+from ..utils import _R
+from .search import Search
+
+if TYPE_CHECKING:
+    from ..response import Response
+
+
+class FacetedSearch(FacetedSearchBase[_R]):
+    _s: Search[_R]
+
+    def count(self) -> int:
+        return self._s.count()
+
+    def search(self) -> Search[_R]:
+        """
+        Returns the base Search object to which the facets are added.
+
+        You can customize the query by overriding this method and returning a
+        modified search object.
+        """
+        s = Search[_R](doc_type=self.doc_types, index=self.index, using=self.using)
+        return s.response_class(FacetedResponse)
+
+    def execute(self) -> "Response[_R]":
+        """
+        Execute the search and return the response.
+        """
+        r = self._s.execute()
+        r._faceted_search = self
+        return r
diff -pruN 8.17.2-2/elasticsearch/dsl/_sync/index.py 9.2.0-1/elasticsearch/dsl/_sync/index.py
--- 8.17.2-2/elasticsearch/dsl/_sync/index.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_sync/index.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,597 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import TYPE_CHECKING, Any, Dict, Optional
+
+from typing_extensions import Self
+
+from ..connections import get_connection
+from ..exceptions import IllegalOperation
+from ..index_base import IndexBase
+from ..utils import UsingType
+from .mapping import Mapping
+from .search import Search
+from .update_by_query import UpdateByQuery
+
+if TYPE_CHECKING:
+    from elastic_transport import ObjectApiResponse
+
+    from elasticsearch import Elasticsearch
+
+
+class IndexTemplate:
+    def __init__(
+        self,
+        name: str,
+        template: str,
+        index: Optional["Index"] = None,
+        order: Optional[int] = None,
+        **kwargs: Any,
+    ):
+        if index is None:
+            self._index = Index(template, **kwargs)
+        else:
+            if kwargs:
+                raise ValueError(
+                    "You cannot specify options for Index when"
+                    " passing an Index instance."
+                )
+            self._index = index.clone()
+            self._index._name = template
+        self._template_name = name
+        self.order = order
+
+    def __getattr__(self, attr_name: str) -> Any:
+        return getattr(self._index, attr_name)
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = self._index.to_dict()
+        d["index_patterns"] = [self._index._name]
+        if self.order is not None:
+            d["order"] = self.order
+        return d
+
+    def save(self, using: Optional[UsingType] = None) -> "ObjectApiResponse[Any]":
+        es = get_connection(using or self._index._using)
+        return es.indices.put_template(name=self._template_name, body=self.to_dict())
+
+
+class ComposableIndexTemplate:
+    def __init__(
+        self,
+        name: str,
+        template: str,
+        index: Optional["Index"] = None,
+        priority: Optional[int] = None,
+        **kwargs: Any,
+    ):
+        if index is None:
+            self._index = Index(template, **kwargs)
+        else:
+            if kwargs:
+                raise ValueError(
+                    "You cannot specify options for Index when"
+                    " passing an Index instance."
+                )
+            self._index = index.clone()
+            self._index._name = template
+        self._template_name = name
+        self.priority = priority
+
+    def __getattr__(self, attr_name: str) -> Any:
+        return getattr(self._index, attr_name)
+
+    def to_dict(self) -> Dict[str, Any]:
+        d: Dict[str, Any] = {"template": self._index.to_dict()}
+        d["index_patterns"] = [self._index._name]
+        if self.priority is not None:
+            d["priority"] = self.priority
+        return d
+
+    def save(self, using: Optional[UsingType] = None) -> "ObjectApiResponse[Any]":
+        es = get_connection(using or self._index._using)
+        return es.indices.put_index_template(name=self._template_name, **self.to_dict())
+
+
+class Index(IndexBase):
+    _using: UsingType
+
+    if TYPE_CHECKING:
+
+        def get_or_create_mapping(self) -> Mapping: ...
+
+    def __init__(self, name: str, using: UsingType = "default"):
+        """
+        :arg name: name of the index
+        :arg using: connection alias to use, defaults to ``'default'``
+        """
+        super().__init__(name, Mapping, using=using)
+
+    def _get_connection(self, using: Optional[UsingType] = None) -> "Elasticsearch":
+        if self._name is None:
+            raise ValueError("You cannot perform API calls on the default index.")
+        return get_connection(using or self._using)
+
+    connection = property(_get_connection)
+
+    def as_template(
+        self,
+        template_name: str,
+        pattern: Optional[str] = None,
+        order: Optional[int] = None,
+    ) -> IndexTemplate:
+        return IndexTemplate(
+            template_name, pattern or self._name, index=self, order=order
+        )
+
+    def as_composable_template(
+        self,
+        template_name: str,
+        pattern: Optional[str] = None,
+        priority: Optional[int] = None,
+    ) -> ComposableIndexTemplate:
+        return ComposableIndexTemplate(
+            template_name, pattern or self._name, index=self, priority=priority
+        )
+
+    def load_mappings(self, using: Optional[UsingType] = None) -> None:
+        self.get_or_create_mapping().update_from_es(
+            self._name, using=using or self._using
+        )
+
+    def clone(
+        self, name: Optional[str] = None, using: Optional[UsingType] = None
+    ) -> Self:
+        """
+        Create a copy of the instance with another name or connection alias.
+        Useful for creating multiple indices with shared configuration::
+
+            i = Index('base-index')
+            i.settings(number_of_shards=1)
+            i.create()
+
+            i2 = i.clone('other-index')
+            i2.create()
+
+        :arg name: name of the index
+        :arg using: connection alias to use, defaults to ``'default'``
+        """
+        i = self.__class__(name or self._name, using=using or self._using)
+        i._settings = self._settings.copy()
+        i._aliases = self._aliases.copy()
+        i._analysis = self._analysis.copy()
+        i._doc_types = self._doc_types[:]
+        if self._mapping is not None:
+            i._mapping = self._mapping._clone()
+        return i
+
+    def search(self, using: Optional[UsingType] = None) -> Search:
+        """
+        Return a :class:`~elasticsearch.dsl.Search` object searching over the
+        index (or all the indices belonging to this template) and its
+        ``Document``\\s.
+        """
+        return Search(
+            using=using or self._using, index=self._name, doc_type=self._doc_types
+        )
+
+    def updateByQuery(self, using: Optional[UsingType] = None) -> UpdateByQuery:
+        """
+        Return a :class:`~elasticsearch.dsl.UpdateByQuery` object searching over the index
+        (or all the indices belonging to this template) and updating Documents that match
+        the search criteria.
+
+        For more information, see here:
+        https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html
+        """
+        return UpdateByQuery(
+            using=using or self._using,
+            index=self._name,
+        )
+
+    def create(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Creates the index in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.create`` unchanged.
+        """
+        return self._get_connection(using).indices.create(
+            index=self._name, body=self.to_dict(), **kwargs
+        )
+
+    def is_closed(self, using: Optional[UsingType] = None) -> bool:
+        state = self._get_connection(using).cluster.state(
+            index=self._name, metric="metadata"
+        )
+        return bool(state["metadata"]["indices"][self._name]["state"] == "close")
+
+    def save(
+        self, using: Optional[UsingType] = None
+    ) -> "Optional[ObjectApiResponse[Any]]":
+        """
+        Sync the index definition with elasticsearch, creating the index if it
+        doesn't exist and updating its settings and mappings if it does.
+
+        Note some settings and mapping changes cannot be done on an open
+        index (or at all on an existing index) and for those this method will
+        fail with the underlying exception.
+        """
+        if not self.exists(using=using):
+            return self.create(using=using)
+
+        body = self.to_dict()
+        settings = body.pop("settings", {})
+        analysis = settings.pop("analysis", None)
+        current_settings = (self.get_settings(using=using))[self._name]["settings"][
+            "index"
+        ]
+        if analysis:
+            if self.is_closed(using=using):
+                # closed index, update away
+                settings["analysis"] = analysis
+            else:
+                # compare analysis definition, if all analysis objects are
+                # already defined as requested, skip analysis update and
+                # proceed, otherwise raise IllegalOperation
+                existing_analysis = current_settings.get("analysis", {})
+                if any(
+                    existing_analysis.get(section, {}).get(k, None)
+                    != analysis[section][k]
+                    for section in analysis
+                    for k in analysis[section]
+                ):
+                    raise IllegalOperation(
+                        "You cannot update analysis configuration on an open index, "
+                        "you need to close index %s first." % self._name
+                    )
+
+        # try and update the settings
+        if settings:
+            settings = settings.copy()
+            for k, v in list(settings.items()):
+                if k in current_settings and current_settings[k] == str(v):
+                    del settings[k]
+
+            if settings:
+                self.put_settings(using=using, body=settings)
+
+        # update the mappings, any conflict in the mappings will result in an
+        # exception
+        mappings = body.pop("mappings", {})
+        if mappings:
+            return self.put_mapping(using=using, body=mappings)
+
+        return None
+
+    def analyze(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Perform the analysis process on a text and return the tokens breakdown
+        of the text.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.analyze`` unchanged.
+        """
+        return self._get_connection(using).indices.analyze(index=self._name, **kwargs)
+
+    def refresh(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Performs a refresh operation on the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.refresh`` unchanged.
+        """
+        return self._get_connection(using).indices.refresh(index=self._name, **kwargs)
+
+    def flush(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Performs a flush operation on the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.flush`` unchanged.
+        """
+        return self._get_connection(using).indices.flush(index=self._name, **kwargs)
+
+    def get(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        The get index API allows to retrieve information about the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get`` unchanged.
+        """
+        return self._get_connection(using).indices.get(index=self._name, **kwargs)
+
+    def open(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Opens the index in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.open`` unchanged.
+        """
+        return self._get_connection(using).indices.open(index=self._name, **kwargs)
+
+    def close(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Closes the index in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.close`` unchanged.
+        """
+        return self._get_connection(using).indices.close(index=self._name, **kwargs)
+
+    def delete(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Deletes the index in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.delete`` unchanged.
+        """
+        return self._get_connection(using).indices.delete(index=self._name, **kwargs)
+
+    def exists(self, using: Optional[UsingType] = None, **kwargs: Any) -> bool:
+        """
+        Returns ``True`` if the index already exists in elasticsearch.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.exists`` unchanged.
+        """
+        return bool(
+            self._get_connection(using).indices.exists(index=self._name, **kwargs)
+        )
+
+    def put_mapping(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Register specific mapping definition for a specific type.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.put_mapping`` unchanged.
+        """
+        return self._get_connection(using).indices.put_mapping(
+            index=self._name, **kwargs
+        )
+
+    def get_mapping(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve specific mapping definition for a specific type.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get_mapping`` unchanged.
+        """
+        return self._get_connection(using).indices.get_mapping(
+            index=self._name, **kwargs
+        )
+
+    def get_field_mapping(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve mapping definition of a specific field.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get_field_mapping`` unchanged.
+        """
+        return self._get_connection(using).indices.get_field_mapping(
+            index=self._name, **kwargs
+        )
+
+    def put_alias(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Create an alias for the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.put_alias`` unchanged.
+        """
+        return self._get_connection(using).indices.put_alias(index=self._name, **kwargs)
+
+    def exists_alias(self, using: Optional[UsingType] = None, **kwargs: Any) -> bool:
+        """
+        Return a boolean indicating whether given alias exists for this index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.exists_alias`` unchanged.
+        """
+        return bool(
+            self._get_connection(using).indices.exists_alias(index=self._name, **kwargs)
+        )
+
+    def get_alias(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve a specified alias.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get_alias`` unchanged.
+        """
+        return self._get_connection(using).indices.get_alias(index=self._name, **kwargs)
+
+    def delete_alias(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Delete specific alias.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.delete_alias`` unchanged.
+        """
+        return self._get_connection(using).indices.delete_alias(
+            index=self._name, **kwargs
+        )
+
+    def get_settings(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve settings for the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.get_settings`` unchanged.
+        """
+        return self._get_connection(using).indices.get_settings(
+            index=self._name, **kwargs
+        )
+
+    def put_settings(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Change specific index level settings in real time.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.put_settings`` unchanged.
+        """
+        return self._get_connection(using).indices.put_settings(
+            index=self._name, **kwargs
+        )
+
+    def stats(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Retrieve statistics on different operations happening on the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.stats`` unchanged.
+        """
+        return self._get_connection(using).indices.stats(index=self._name, **kwargs)
+
+    def segments(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Provide low level segments information that a Lucene index (shard
+        level) is built with.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.segments`` unchanged.
+        """
+        return self._get_connection(using).indices.segments(index=self._name, **kwargs)
+
+    def validate_query(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Validate a potentially expensive query without executing it.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.validate_query`` unchanged.
+        """
+        return self._get_connection(using).indices.validate_query(
+            index=self._name, **kwargs
+        )
+
+    def clear_cache(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Clear all caches or specific cached associated with the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.clear_cache`` unchanged.
+        """
+        return self._get_connection(using).indices.clear_cache(
+            index=self._name, **kwargs
+        )
+
+    def recovery(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        The indices recovery API provides insight into on-going shard
+        recoveries for the index.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.recovery`` unchanged.
+        """
+        return self._get_connection(using).indices.recovery(index=self._name, **kwargs)
+
+    def shard_stores(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        Provides store information for shard copies of the index. Store
+        information reports on which nodes shard copies exist, the shard copy
+        version, indicating how recent they are, and any exceptions encountered
+        while opening the shard index or from earlier engine failure.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.shard_stores`` unchanged.
+        """
+        return self._get_connection(using).indices.shard_stores(
+            index=self._name, **kwargs
+        )
+
+    def forcemerge(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        The force merge API allows to force merging of the index through an
+        API. The merge relates to the number of segments a Lucene index holds
+        within each shard. The force merge operation allows to reduce the
+        number of segments by merging them.
+
+        This call will block until the merge is complete. If the http
+        connection is lost, the request will continue in the background, and
+        any new requests will block until the previous force merge is complete.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.forcemerge`` unchanged.
+        """
+        return self._get_connection(using).indices.forcemerge(
+            index=self._name, **kwargs
+        )
+
+    def shrink(
+        self, using: Optional[UsingType] = None, **kwargs: Any
+    ) -> "ObjectApiResponse[Any]":
+        """
+        The shrink index API allows you to shrink an existing index into a new
+        index with fewer primary shards. The number of primary shards in the
+        target index must be a factor of the shards in the source index. For
+        example an index with 8 primary shards can be shrunk into 4, 2 or 1
+        primary shards or an index with 15 primary shards can be shrunk into 5,
+        3 or 1. If the number of shards in the index is a prime number it can
+        only be shrunk into a single primary shard. Before shrinking, a
+        (primary or replica) copy of every shard in the index must be present
+        on the same node.
+
+        Any additional keyword arguments will be passed to
+        ``Elasticsearch.indices.shrink`` unchanged.
+        """
+        return self._get_connection(using).indices.shrink(index=self._name, **kwargs)
diff -pruN 8.17.2-2/elasticsearch/dsl/_sync/mapping.py 9.2.0-1/elasticsearch/dsl/_sync/mapping.py
--- 8.17.2-2/elasticsearch/dsl/_sync/mapping.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_sync/mapping.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,49 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import List, Optional, Union
+
+from typing_extensions import Self
+
+from ..connections import get_connection
+from ..mapping_base import MappingBase
+from ..utils import UsingType
+
+
+class Mapping(MappingBase):
+    @classmethod
+    def from_es(
+        cls, index: Optional[Union[str, List[str]]], using: UsingType = "default"
+    ) -> Self:
+        m = cls()
+        m.update_from_es(index, using)
+        return m
+
+    def update_from_es(
+        self, index: Optional[Union[str, List[str]]], using: UsingType = "default"
+    ) -> None:
+        es = get_connection(using)
+        raw = es.indices.get_mapping(index=index)
+        _, raw = raw.popitem()
+        self._update_from_dict(raw["mappings"])
+
+    def save(self, index: str, using: UsingType = "default") -> None:
+        from .index import Index
+
+        i = Index(index, using=using)
+        i.mapping(self)
+        i.save()
diff -pruN 8.17.2-2/elasticsearch/dsl/_sync/search.py 9.2.0-1/elasticsearch/dsl/_sync/search.py
--- 8.17.2-2/elasticsearch/dsl/_sync/search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_sync/search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,230 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import contextlib
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Iterator,
+    List,
+    Optional,
+    cast,
+)
+
+from typing_extensions import Self
+
+from elasticsearch.exceptions import ApiError
+from elasticsearch.helpers import scan
+
+from ..connections import get_connection
+from ..response import Response
+from ..search_base import MultiSearchBase, SearchBase
+from ..utils import _R, AttrDict, UsingType
+
+
+class Search(SearchBase[_R]):
+    _using: UsingType
+
+    def __iter__(self) -> Iterator[_R]:
+        """
+        Iterate over the hits.
+        """
+
+        class ResultsIterator(Iterator[_R]):
+            def __init__(self, search: Search[_R]):
+                self.search = search
+                self.iterator: Optional[Iterator[_R]] = None
+
+            def __next__(self) -> _R:
+                if self.iterator is None:
+                    self.iterator = iter(self.search.execute())
+                try:
+                    return next(self.iterator)
+                except StopIteration:
+                    raise StopIteration()
+
+        return ResultsIterator(self)
+
+    def count(self) -> int:
+        """
+        Return the number of hits matching the query and filters. Note that
+        only the actual number is returned.
+        """
+        if hasattr(self, "_response") and self._response.hits.total.relation == "eq":  # type: ignore[attr-defined]
+            return cast(int, self._response.hits.total.value)  # type: ignore[attr-defined]
+
+        es = get_connection(self._using)
+
+        d = self.to_dict(count=True)
+        # TODO: failed shards detection
+        resp = es.count(
+            index=self._index,
+            query=cast(Optional[Dict[str, Any]], d.get("query", None)),
+            **self._params,
+        )
+
+        return cast(int, resp["count"])
+
+    def execute(self, ignore_cache: bool = False) -> Response[_R]:
+        """
+        Execute the search and return an instance of ``Response`` wrapping all
+        the data.
+
+        :arg ignore_cache: if set to ``True``, consecutive calls will hit
+            ES, while cached result will be ignored. Defaults to `False`
+        """
+        if ignore_cache or not hasattr(self, "_response"):
+            es = get_connection(self._using)
+
+            self._response = self._response_class(
+                self,
+                (
+                    es.search(index=self._index, body=self.to_dict(), **self._params)
+                ).body,
+            )
+        return self._response
+
+    def scan(self) -> Iterator[_R]:
+        """
+        Turn the search into a scan search and return a generator that will
+        iterate over all the documents matching the query.
+
+        Use the ``params`` method to specify any additional arguments you wish to
+        pass to the underlying ``scan`` helper from ``elasticsearch-py`` -
+        https://elasticsearch-py.readthedocs.io/en/latest/helpers.html#scan
+
+        The ``iterate()`` method should be preferred, as it provides similar
+        functionality using an Elasticsearch point in time.
+        """
+        es = get_connection(self._using)
+
+        for hit in scan(es, query=self.to_dict(), index=self._index, **self._params):
+            yield self._get_result(cast(AttrDict[Any], hit))
+
+    def delete(self) -> AttrDict[Any]:
+        """
+        ``delete()`` executes the query by delegating to ``delete_by_query()``.
+
+        Use the ``params`` method to specify any additional arguments you wish to
+        pass to the underlying ``delete_by_query`` helper from ``elasticsearch-py`` -
+        https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.delete_by_query
+        """
+
+        es = get_connection(self._using)
+        assert self._index is not None
+
+        return AttrDict(
+            cast(
+                Dict[str, Any],
+                es.delete_by_query(
+                    index=self._index, body=self.to_dict(), **self._params
+                ),
+            )
+        )
+
+    @contextlib.contextmanager
+    def point_in_time(self, keep_alive: str = "1m") -> Iterator[Self]:
+        """
+        Open a point in time (pit) that can be used across several searches.
+
+        This method implements a context manager that returns a search object
+        configured to operate within the created pit.
+
+        :arg keep_alive: the time to live for the point in time, renewed with each search request
+        """
+        es = get_connection(self._using)
+
+        pit = es.open_point_in_time(index=self._index or "*", keep_alive=keep_alive)
+        search = self.index().extra(pit={"id": pit["id"], "keep_alive": keep_alive})
+        if not search._sort:
+            search = search.sort("_shard_doc")
+        yield search
+        es.close_point_in_time(id=pit["id"])
+
+    def iterate(self, keep_alive: str = "1m") -> Iterator[_R]:
+        """
+        Return a generator that iterates over all the documents matching the query.
+
+        This method uses a point in time to provide consistent results even when
+        the index is changing. It should be preferred over ``scan()``.
+
+        :arg keep_alive: the time to live for the point in time, renewed with each new search request
+        """
+        with self.point_in_time(keep_alive=keep_alive) as s:
+            while True:
+                r = s.execute()
+                for hit in r:
+                    yield hit
+                if len(r.hits) == 0:
+                    break
+                s = s.search_after()
+
+
+class MultiSearch(MultiSearchBase[_R]):
+    """
+    Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single
+    request.
+    """
+
+    _using: UsingType
+
+    if TYPE_CHECKING:
+
+        def add(self, search: Search[_R]) -> Self: ...  # type: ignore[override]
+
+    def execute(
+        self, ignore_cache: bool = False, raise_on_error: bool = True
+    ) -> List[Response[_R]]:
+        """
+        Execute the multi search request and return a list of search results.
+        """
+        if ignore_cache or not hasattr(self, "_response"):
+            es = get_connection(self._using)
+
+            responses = es.msearch(
+                index=self._index, body=self.to_dict(), **self._params
+            )
+
+            out: List[Response[_R]] = []
+            for s, r in zip(self._searches, responses["responses"]):
+                if r.get("error", False):
+                    if raise_on_error:
+                        raise ApiError("N/A", meta=responses.meta, body=r)
+                    r = None
+                else:
+                    r = Response(s, r)
+                out.append(r)
+
+            self._response = out
+
+        return self._response
+
+
+class EmptySearch(Search[_R]):
+    def count(self) -> int:
+        return 0
+
+    def execute(self, ignore_cache: bool = False) -> Response[_R]:
+        return self._response_class(self, {"hits": {"total": 0, "hits": []}})
+
+    def scan(self) -> Iterator[_R]:
+        return
+        yield  # a bit strange, but this forces an empty generator function
+
+    def delete(self) -> AttrDict[Any]:
+        return AttrDict[Any]({})
diff -pruN 8.17.2-2/elasticsearch/dsl/_sync/update_by_query.py 9.2.0-1/elasticsearch/dsl/_sync/update_by_query.py
--- 8.17.2-2/elasticsearch/dsl/_sync/update_by_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/_sync/update_by_query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,45 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import TYPE_CHECKING
+
+from ..connections import get_connection
+from ..update_by_query_base import UpdateByQueryBase
+from ..utils import _R, UsingType
+
+if TYPE_CHECKING:
+    from ..response import UpdateByQueryResponse
+
+
+class UpdateByQuery(UpdateByQueryBase[_R]):
+    _using: UsingType
+
+    def execute(self) -> "UpdateByQueryResponse[_R]":
+        """
+        Execute the search and return an instance of ``Response`` wrapping all
+        the data.
+        """
+        es = get_connection(self._using)
+        assert self._index is not None
+
+        self._response = self._response_class(
+            self,
+            (
+                es.update_by_query(index=self._index, **self.to_dict(), **self._params)
+            ).body,
+        )
+        return self._response
diff -pruN 8.17.2-2/elasticsearch/dsl/aggs.py 9.2.0-1/elasticsearch/dsl/aggs.py
--- 8.17.2-2/elasticsearch/dsl/aggs.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/aggs.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,3851 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+from copy import deepcopy
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    ClassVar,
+    Dict,
+    Generic,
+    Iterable,
+    Literal,
+    Mapping,
+    MutableMapping,
+    Optional,
+    Sequence,
+    Union,
+    cast,
+)
+
+from elastic_transport.client_utils import DEFAULT
+
+from . import wrappers
+from .query import Query
+from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData
+from .utils import _R, AttrDict, DslBase
+
+if TYPE_CHECKING:
+    from elastic_transport.client_utils import DefaultType
+
+    from . import types
+    from .document_base import InstrumentedField
+    from .search_base import SearchBase
+
+
+def A(
+    name_or_agg: Union[MutableMapping[str, Any], "Agg[_R]", str],
+    filter: Optional[Union[str, "Query"]] = None,
+    **params: Any,
+) -> "Agg[_R]":
+    if filter is not None:
+        if name_or_agg != "filter":
+            raise ValueError(
+                "Aggregation %r doesn't accept positional argument 'filter'."
+                % name_or_agg
+            )
+        params["filter"] = filter
+
+    # {"terms": {"field": "tags"}, "aggs": {...}}
+    if isinstance(name_or_agg, collections.abc.MutableMapping):
+        if params:
+            raise ValueError("A() cannot accept parameters when passing in a dict.")
+        # copy to avoid modifying in-place
+        agg = deepcopy(name_or_agg)
+        # pop out nested aggs
+        aggs = agg.pop("aggs", None)
+        # pop out meta data
+        meta = agg.pop("meta", None)
+        # should be {"terms": {"field": "tags"}}
+        if len(agg) != 1:
+            raise ValueError(
+                'A() can only accept dict with an aggregation ({"terms": {...}}). '
+                "Instead it got (%r)" % name_or_agg
+            )
+        agg_type, params = agg.popitem()
+        if aggs:
+            params = params.copy()
+            params["aggs"] = aggs
+        if meta:
+            params = params.copy()
+            params["meta"] = meta
+        return Agg[_R].get_dsl_class(agg_type)(_expand__to_dot=False, **params)
+
+    # Terms(...) just return the nested agg
+    elif isinstance(name_or_agg, Agg):
+        if params:
+            raise ValueError(
+                "A() cannot accept parameters when passing in an Agg object."
+            )
+        return name_or_agg
+
+    # "terms", field="tags"
+    return Agg[_R].get_dsl_class(name_or_agg)(**params)
+
+
+class Agg(DslBase, Generic[_R]):
+    _type_name = "agg"
+    _type_shortcut = staticmethod(A)
+    name = ""
+
+    def __contains__(self, key: str) -> bool:
+        return False
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super().to_dict()
+        if isinstance(d[self.name], dict):
+            n = cast(Dict[str, Any], d[self.name])
+            if "meta" in n:
+                d["meta"] = n.pop("meta")
+        return d
+
+    def result(self, search: "SearchBase[_R]", data: Dict[str, Any]) -> AttrDict[Any]:
+        return AggResponse[_R](self, search, data)
+
+
+class AggBase(Generic[_R]):
+    aggs: Dict[str, Agg[_R]]
+    _base: Agg[_R]
+    _params: Dict[str, Any]
+    _param_defs: ClassVar[Dict[str, Any]] = {
+        "aggs": {"type": "agg", "hash": True},
+    }
+
+    def __contains__(self, key: str) -> bool:
+        return key in self._params.get("aggs", {})
+
+    def __getitem__(self, agg_name: str) -> Agg[_R]:
+        agg = cast(
+            Agg[_R], self._params.setdefault("aggs", {})[agg_name]
+        )  # propagate KeyError
+
+        # make sure we're not mutating a shared state - whenever accessing a
+        # bucket, return a shallow copy of it to be safe
+        if isinstance(agg, Bucket):
+            agg = A(agg.name, **agg._params)
+            # be sure to store the copy so any modifications to it will affect us
+            self._params["aggs"][agg_name] = agg
+
+        return agg
+
+    def __setitem__(self, agg_name: str, agg: Agg[_R]) -> None:
+        self.aggs[agg_name] = A(agg)
+
+    def __iter__(self) -> Iterable[str]:
+        return iter(self.aggs)
+
+    def _agg(
+        self,
+        bucket: bool,
+        name: str,
+        agg_type: Union[Dict[str, Any], Agg[_R], str],
+        *args: Any,
+        **params: Any,
+    ) -> Agg[_R]:
+        agg = self[name] = A(agg_type, *args, **params)
+
+        # For chaining - when creating new buckets return them...
+        if bucket:
+            return agg
+        # otherwise return self._base so we can keep chaining
+        else:
+            return self._base
+
+    def metric(
+        self,
+        name: str,
+        agg_type: Union[Dict[str, Any], Agg[_R], str],
+        *args: Any,
+        **params: Any,
+    ) -> Agg[_R]:
+        return self._agg(False, name, agg_type, *args, **params)
+
+    def bucket(
+        self,
+        name: str,
+        agg_type: Union[Dict[str, Any], Agg[_R], str],
+        *args: Any,
+        **params: Any,
+    ) -> "Bucket[_R]":
+        return cast("Bucket[_R]", self._agg(True, name, agg_type, *args, **params))
+
+    def pipeline(
+        self,
+        name: str,
+        agg_type: Union[Dict[str, Any], Agg[_R], str],
+        *args: Any,
+        **params: Any,
+    ) -> "Pipeline[_R]":
+        return cast("Pipeline[_R]", self._agg(False, name, agg_type, *args, **params))
+
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return BucketData(self, search, data)  # type: ignore[arg-type]
+
+
+class Bucket(AggBase[_R], Agg[_R]):
+    def __init__(self, **params: Any):
+        super().__init__(**params)
+        # remember self for chaining
+        self._base = self
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super(AggBase, self).to_dict()
+        if isinstance(d[self.name], dict):
+            n = cast(AttrDict[Any], d[self.name])
+            if "aggs" in n:
+                d["aggs"] = n.pop("aggs")
+        return d
+
+
+class Pipeline(Agg[_R]):
+    pass
+
+
+class AdjacencyMatrix(Bucket[_R]):
+    """
+    A bucket aggregation returning a form of adjacency matrix. The request
+    provides a collection of named filter expressions, similar to the
+    `filters` aggregation. Each bucket in the response represents a non-
+    empty cell in the matrix of intersecting filters.
+
+    :arg filters: Filters used to create buckets. At least one filter is
+        required.
+    :arg separator: Separator used to concatenate filter names. Defaults
+        to &.
+    """
+
+    name = "adjacency_matrix"
+    _param_defs = {
+        "filters": {"type": "query", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *,
+        filters: Union[Mapping[str, Query], "DefaultType"] = DEFAULT,
+        separator: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(filters=filters, separator=separator, **kwargs)
+
+
+class AutoDateHistogram(Bucket[_R]):
+    """
+    A multi-bucket aggregation similar to the date histogram, except
+    instead of providing an interval to use as the width of each bucket, a
+    target number of buckets is provided.
+
+    :arg buckets: The target number of buckets. Defaults to `10` if
+        omitted.
+    :arg field: The field on which to run the aggregation.
+    :arg format: The date format used to format `key_as_string` in the
+        response. If no `format` is specified, the first date format
+        specified in the field mapping is used.
+    :arg minimum_interval: The minimum rounding interval. This can make
+        the collection process more efficient, as the aggregation will not
+        attempt to round at any interval lower than `minimum_interval`.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg offset: Time zone specified as a ISO 8601 UTC offset.
+    :arg params:
+    :arg script:
+    :arg time_zone: Time zone ID.
+    """
+
+    name = "auto_date_histogram"
+
+    def __init__(
+        self,
+        *,
+        buckets: Union[int, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        minimum_interval: Union[
+            Literal["second", "minute", "hour", "day", "month", "year"], "DefaultType"
+        ] = DEFAULT,
+        missing: Any = DEFAULT,
+        offset: Union[str, "DefaultType"] = DEFAULT,
+        params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_zone: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            buckets=buckets,
+            field=field,
+            format=format,
+            minimum_interval=minimum_interval,
+            missing=missing,
+            offset=offset,
+            params=params,
+            script=script,
+            time_zone=time_zone,
+            **kwargs,
+        )
+
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return FieldBucketData(self, search, data)
+
+
+class Avg(Agg[_R]):
+    """
+    A single-value metrics aggregation that computes the average of
+    numeric values that are extracted from the aggregated documents.
+
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "avg"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, field=field, missing=missing, script=script, **kwargs
+        )
+
+
+class AvgBucket(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which calculates the mean value of a
+    specified metric in a sibling aggregation. The specified metric must
+    be numeric and the sibling aggregation must be a multi-bucket
+    aggregation.
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "avg_bucket"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class Boxplot(Agg[_R]):
+    """
+    A metrics aggregation that computes a box plot of numeric values
+    extracted from the aggregated documents.
+
+    :arg compression: Limits the maximum number of nodes used by the
+        underlying TDigest algorithm to `20 * compression`, enabling
+        control of memory usage and approximation error.
+    :arg execution_hint: The default implementation of TDigest is
+        optimized for performance, scaling to millions or even billions of
+        sample values while maintaining acceptable accuracy levels (close
+        to 1% relative error for millions of samples in some cases). To
+        use an implementation optimized for accuracy, set this parameter
+        to high_accuracy instead. Defaults to `default` if omitted.
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "boxplot"
+
+    def __init__(
+        self,
+        *,
+        compression: Union[float, "DefaultType"] = DEFAULT,
+        execution_hint: Union[
+            Literal["default", "high_accuracy"], "DefaultType"
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            compression=compression,
+            execution_hint=execution_hint,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class BucketScript(Pipeline[_R]):
+    """
+    A parent pipeline aggregation which runs a script which can perform
+    per bucket computations on metrics in the parent multi-bucket
+    aggregation.
+
+    :arg script: The script to run for this aggregation.
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "bucket_script"
+
+    def __init__(
+        self,
+        *,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            script=script,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class BucketSelector(Pipeline[_R]):
+    """
+    A parent pipeline aggregation which runs a script to determine whether
+    the current bucket will be retained in the parent multi-bucket
+    aggregation.
+
+    :arg script: The script to run for this aggregation.
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "bucket_selector"
+
+    def __init__(
+        self,
+        *,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            script=script,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class BucketSort(Bucket[_R]):
+    """
+    A parent pipeline aggregation which sorts the buckets of its parent
+    multi-bucket aggregation.
+
+    :arg from: Buckets in positions prior to `from` will be truncated.
+    :arg gap_policy: The policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg size: The number of buckets to return. Defaults to all buckets of
+        the parent aggregation.
+    :arg sort: The list of fields to sort on.
+    """
+
+    name = "bucket_sort"
+
+    def __init__(
+        self,
+        *,
+        from_: Union[int, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        sort: Union[
+            Union[Union[str, "InstrumentedField"], "types.SortOptions"],
+            Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]],
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            from_=from_, gap_policy=gap_policy, size=size, sort=sort, **kwargs
+        )
+
+
+class BucketCountKsTest(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which runs a two sample
+    Kolmogorov–Smirnov test ("K-S test") against a provided distribution
+    and the distribution implied by the documents counts in the configured
+    sibling aggregation.
+
+    :arg alternative: A list of string values indicating which K-S test
+        alternative to calculate. The valid values are: "greater", "less",
+        "two_sided". This parameter is key for determining the K-S
+        statistic used when calculating the K-S test. Default value is all
+        possible alternative hypotheses.
+    :arg fractions: A list of doubles indicating the distribution of the
+        samples with which to compare to the `buckets_path` results. In
+        typical usage this is the overall proportion of documents in each
+        bucket, which is compared with the actual document proportions in
+        each bucket from the sibling aggregation counts. The default is to
+        assume that overall documents are uniformly distributed on these
+        buckets, which they would be if one used equal percentiles of a
+        metric to define the bucket end points.
+    :arg sampling_method: Indicates the sampling methodology when
+        calculating the K-S test. Note, this is sampling of the returned
+        values. This determines the cumulative distribution function (CDF)
+        points used comparing the two samples. Default is `upper_tail`,
+        which emphasizes the upper end of the CDF points. Valid options
+        are: `upper_tail`, `uniform`, and `lower_tail`.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "bucket_count_ks_test"
+
+    def __init__(
+        self,
+        *,
+        alternative: Union[Sequence[str], "DefaultType"] = DEFAULT,
+        fractions: Union[Sequence[float], "DefaultType"] = DEFAULT,
+        sampling_method: Union[str, "DefaultType"] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            alternative=alternative,
+            fractions=fractions,
+            sampling_method=sampling_method,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class BucketCorrelation(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which runs a correlation function on
+    the configured sibling multi-bucket aggregation.
+
+    :arg function: (required) The correlation function to execute.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "bucket_correlation"
+
+    def __init__(
+        self,
+        *,
+        function: Union[
+            "types.BucketCorrelationFunction", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(function=function, buckets_path=buckets_path, **kwargs)
+
+
+class Cardinality(Agg[_R]):
+    """
+    A single-value metrics aggregation that calculates an approximate
+    count of distinct values.
+
+    :arg precision_threshold: A unique count below which counts are
+        expected to be close to accurate. This allows to trade memory for
+        accuracy. Defaults to `3000` if omitted.
+    :arg rehash:
+    :arg execution_hint: Mechanism by which cardinality aggregations is
+        run.
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "cardinality"
+
+    def __init__(
+        self,
+        *,
+        precision_threshold: Union[int, "DefaultType"] = DEFAULT,
+        rehash: Union[bool, "DefaultType"] = DEFAULT,
+        execution_hint: Union[
+            Literal[
+                "global_ordinals",
+                "segment_ordinals",
+                "direct",
+                "save_memory_heuristic",
+                "save_time_heuristic",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            precision_threshold=precision_threshold,
+            rehash=rehash,
+            execution_hint=execution_hint,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class CartesianBounds(Agg[_R]):
+    """
+    A metric aggregation that computes the spatial bounding box containing
+    all values for a Point or Shape field.
+
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "cartesian_bounds"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(field=field, missing=missing, script=script, **kwargs)
+
+
+class CartesianCentroid(Agg[_R]):
+    """
+    A metric aggregation that computes the weighted centroid from all
+    coordinate values for point and shape fields.
+
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "cartesian_centroid"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(field=field, missing=missing, script=script, **kwargs)
+
+
+class CategorizeText(Bucket[_R]):
+    """
+    A multi-bucket aggregation that groups semi-structured text into
+    buckets.
+
+    :arg field: (required) The semi-structured text field to categorize.
+    :arg max_unique_tokens: The maximum number of unique tokens at any
+        position up to max_matched_tokens. Must be larger than 1. Smaller
+        values use less memory and create fewer categories. Larger values
+        will use more memory and create narrower categories. Max allowed
+        value is 100. Defaults to `50` if omitted.
+    :arg max_matched_tokens: The maximum number of token positions to
+        match on before attempting to merge categories. Larger values will
+        use more memory and create narrower categories. Max allowed value
+        is 100. Defaults to `5` if omitted.
+    :arg similarity_threshold: The minimum percentage of tokens that must
+        match for text to be added to the category bucket. Must be between
+        1 and 100. The larger the value the narrower the categories.
+        Larger values will increase memory usage and create narrower
+        categories. Defaults to `50` if omitted.
+    :arg categorization_filters: This property expects an array of regular
+        expressions. The expressions are used to filter out matching
+        sequences from the categorization field values. You can use this
+        functionality to fine tune the categorization by excluding
+        sequences from consideration when categories are defined. For
+        example, you can exclude SQL statements that appear in your log
+        files. This property cannot be used at the same time as
+        categorization_analyzer. If you only want to define simple regular
+        expression filters that are applied prior to tokenization, setting
+        this property is the easiest method. If you also want to customize
+        the tokenizer or post-tokenization filtering, use the
+        categorization_analyzer property instead and include the filters
+        as pattern_replace character filters.
+    :arg categorization_analyzer: The categorization analyzer specifies
+        how the text is analyzed and tokenized before being categorized.
+        The syntax is very similar to that used to define the analyzer in
+        the analyze API. This property cannot be used at the same time as
+        `categorization_filters`.
+    :arg shard_size: The number of categorization buckets to return from
+        each shard before merging all the results.
+    :arg size: The number of buckets to return. Defaults to `10` if
+        omitted.
+    :arg min_doc_count: The minimum number of documents in a bucket to be
+        returned to the results.
+    :arg shard_min_doc_count: The minimum number of documents in a bucket
+        to be returned from the shard before merging.
+    """
+
+    name = "categorize_text"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        max_unique_tokens: Union[int, "DefaultType"] = DEFAULT,
+        max_matched_tokens: Union[int, "DefaultType"] = DEFAULT,
+        similarity_threshold: Union[int, "DefaultType"] = DEFAULT,
+        categorization_filters: Union[Sequence[str], "DefaultType"] = DEFAULT,
+        categorization_analyzer: Union[
+            str, "types.CustomCategorizeTextAnalyzer", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            max_unique_tokens=max_unique_tokens,
+            max_matched_tokens=max_matched_tokens,
+            similarity_threshold=similarity_threshold,
+            categorization_filters=categorization_filters,
+            categorization_analyzer=categorization_analyzer,
+            shard_size=shard_size,
+            size=size,
+            min_doc_count=min_doc_count,
+            shard_min_doc_count=shard_min_doc_count,
+            **kwargs,
+        )
+
+
+class ChangePoint(Pipeline[_R]):
+    """
+    A sibling pipeline that detects, spikes, dips, and change points in a
+    metric. Given a distribution of values provided by the sibling multi-
+    bucket aggregation, this aggregation indicates the bucket of any spike
+    or dip and/or the bucket at which the largest change in the
+    distribution of values, if they are statistically significant. There
+    must be at least 22 bucketed values. Fewer than 1,000 is preferred.
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "change_point"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class Children(Bucket[_R]):
+    """
+    A single bucket aggregation that selects child documents that have the
+    specified type, as defined in a `join` field.
+
+    :arg type: The child type that should be selected.
+    """
+
+    name = "children"
+
+    def __init__(self, type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any):
+        super().__init__(type=type, **kwargs)
+
+
+class Composite(Bucket[_R]):
+    """
+    A multi-bucket aggregation that creates composite buckets from
+    different sources. Unlike the other multi-bucket aggregations, you can
+    use the `composite` aggregation to paginate *all* buckets from a
+    multi-level aggregation efficiently.
+
+    :arg after: When paginating, use the `after_key` value returned in the
+        previous response to retrieve the next page.
+    :arg size: The number of composite buckets that should be returned.
+        Defaults to `10` if omitted.
+    :arg sources: The value sources used to build composite buckets. Keys
+        are returned in the order of the `sources` definition.
+    """
+
+    name = "composite"
+
+    def __init__(
+        self,
+        *,
+        after: Union[
+            Mapping[
+                Union[str, "InstrumentedField"], Union[int, float, str, bool, None]
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        sources: Union[Sequence[Mapping[str, Agg[_R]]], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(after=after, size=size, sources=sources, **kwargs)
+
+
+class CumulativeCardinality(Pipeline[_R]):
+    """
+    A parent pipeline aggregation which calculates the cumulative
+    cardinality in a parent `histogram` or `date_histogram` aggregation.
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "cumulative_cardinality"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class CumulativeSum(Pipeline[_R]):
+    """
+    A parent pipeline aggregation which calculates the cumulative sum of a
+    specified metric in a parent `histogram` or `date_histogram`
+    aggregation.
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "cumulative_sum"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class DateHistogram(Bucket[_R]):
+    """
+    A multi-bucket values source based aggregation that can be applied on
+    date values or date range values extracted from the documents. It
+    dynamically builds fixed size (interval) buckets over the values.
+
+    :arg calendar_interval: Calendar-aware interval. Can be specified
+        using the unit name, such as `month`, or as a single unit
+        quantity, such as `1M`.
+    :arg extended_bounds: Enables extending the bounds of the histogram
+        beyond the data itself.
+    :arg hard_bounds: Limits the histogram to specified bounds.
+    :arg field: The date field whose values are use to build a histogram.
+    :arg fixed_interval: Fixed intervals: a fixed number of SI units and
+        never deviate, regardless of where they fall on the calendar.
+    :arg format: The date format used to format `key_as_string` in the
+        response. If no `format` is specified, the first date format
+        specified in the field mapping is used.
+    :arg interval:
+    :arg min_doc_count: Only returns buckets that have `min_doc_count`
+        number of documents. By default, all buckets between the first
+        bucket that matches documents and the last one are returned.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg offset: Changes the start value of each bucket by the specified
+        positive (`+`) or negative offset (`-`) duration.
+    :arg order: The sort order of the returned buckets.
+    :arg params:
+    :arg script:
+    :arg time_zone: Time zone used for bucketing and rounding. Defaults to
+        Coordinated Universal Time (UTC).
+    :arg keyed: Set to `true` to associate a unique string key with each
+        bucket and return the ranges as a hash rather than an array.
+    """
+
+    name = "date_histogram"
+
+    def __init__(
+        self,
+        *,
+        calendar_interval: Union[
+            Literal[
+                "second", "minute", "hour", "day", "week", "month", "quarter", "year"
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        extended_bounds: Union[
+            "types.ExtendedBounds", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        hard_bounds: Union[
+            "types.ExtendedBounds", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        fixed_interval: Any = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        interval: Any = DEFAULT,
+        min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        missing: Any = DEFAULT,
+        offset: Any = DEFAULT,
+        order: Union[
+            Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]],
+            Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]],
+            "DefaultType",
+        ] = DEFAULT,
+        params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_zone: Union[str, "DefaultType"] = DEFAULT,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            calendar_interval=calendar_interval,
+            extended_bounds=extended_bounds,
+            hard_bounds=hard_bounds,
+            field=field,
+            fixed_interval=fixed_interval,
+            format=format,
+            interval=interval,
+            min_doc_count=min_doc_count,
+            missing=missing,
+            offset=offset,
+            order=order,
+            params=params,
+            script=script,
+            time_zone=time_zone,
+            keyed=keyed,
+            **kwargs,
+        )
+
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return FieldBucketData(self, search, data)
+
+
+class DateRange(Bucket[_R]):
+    """
+    A multi-bucket value source based aggregation that enables the user to
+    define a set of date ranges - each representing a bucket.
+
+    :arg field: The date field whose values are use to build ranges.
+    :arg format: The date format used to format `from` and `to` in the
+        response.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg ranges: Array of date ranges.
+    :arg time_zone: Time zone used to convert dates from another time zone
+        to UTC.
+    :arg keyed: Set to `true` to associate a unique string key with each
+        bucket and returns the ranges as a hash rather than an array.
+    """
+
+    name = "date_range"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        ranges: Union[
+            Sequence["wrappers.AggregationRange"],
+            Sequence[Dict[str, Any]],
+            "DefaultType",
+        ] = DEFAULT,
+        time_zone: Union[str, "DefaultType"] = DEFAULT,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            format=format,
+            missing=missing,
+            ranges=ranges,
+            time_zone=time_zone,
+            keyed=keyed,
+            **kwargs,
+        )
+
+
+class Derivative(Pipeline[_R]):
+    """
+    A parent pipeline aggregation which calculates the derivative of a
+    specified metric in a parent `histogram` or `date_histogram`
+    aggregation.
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "derivative"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class DiversifiedSampler(Bucket[_R]):
+    """
+    A filtering aggregation used to limit any sub aggregations' processing
+    to a sample of the top-scoring documents. Similar to the `sampler`
+    aggregation, but adds the ability to limit the number of matches that
+    share a common value.
+
+    :arg execution_hint: The type of value used for de-duplication.
+        Defaults to `global_ordinals` if omitted.
+    :arg max_docs_per_value: Limits how many documents are permitted per
+        choice of de-duplicating value. Defaults to `1` if omitted.
+    :arg script:
+    :arg shard_size: Limits how many top-scoring documents are collected
+        in the sample processed on each shard. Defaults to `100` if
+        omitted.
+    :arg field: The field used to provide values used for de-duplication.
+    """
+
+    name = "diversified_sampler"
+
+    def __init__(
+        self,
+        *,
+        execution_hint: Union[
+            Literal["map", "global_ordinals", "bytes_hash"], "DefaultType"
+        ] = DEFAULT,
+        max_docs_per_value: Union[int, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            execution_hint=execution_hint,
+            max_docs_per_value=max_docs_per_value,
+            script=script,
+            shard_size=shard_size,
+            field=field,
+            **kwargs,
+        )
+
+
+class ExtendedStats(Agg[_R]):
+    """
+    A multi-value metrics aggregation that computes stats over numeric
+    values extracted from the aggregated documents.
+
+    :arg sigma: The number of standard deviations above/below the mean to
+        display.
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "extended_stats"
+
+    def __init__(
+        self,
+        *,
+        sigma: Union[float, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            sigma=sigma,
+            format=format,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class ExtendedStatsBucket(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which calculates a variety of stats
+    across all bucket of a specified metric in a sibling aggregation.
+
+    :arg sigma: The number of standard deviations above/below the mean to
+        display.
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "extended_stats_bucket"
+
+    def __init__(
+        self,
+        *,
+        sigma: Union[float, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            sigma=sigma,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class FrequentItemSets(Agg[_R]):
+    """
+    A bucket aggregation which finds frequent item sets, a form of
+    association rules mining that identifies items that often occur
+    together.
+
+    :arg fields: (required) Fields to analyze.
+    :arg minimum_set_size: The minimum size of one item set. Defaults to
+        `1` if omitted.
+    :arg minimum_support: The minimum support of one item set. Defaults to
+        `0.1` if omitted.
+    :arg size: The number of top item sets to return. Defaults to `10` if
+        omitted.
+    :arg filter: Query that filters documents from analysis.
+    """
+
+    name = "frequent_item_sets"
+    _param_defs = {
+        "filter": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        fields: Union[
+            Sequence["types.FrequentItemSetsField"],
+            Sequence[Dict[str, Any]],
+            "DefaultType",
+        ] = DEFAULT,
+        minimum_set_size: Union[int, "DefaultType"] = DEFAULT,
+        minimum_support: Union[float, "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        filter: Union[Query, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            fields=fields,
+            minimum_set_size=minimum_set_size,
+            minimum_support=minimum_support,
+            size=size,
+            filter=filter,
+            **kwargs,
+        )
+
+
+class Filter(Bucket[_R]):
+    """
+    A single bucket aggregation that narrows the set of documents to those
+    that match a query.
+
+    :arg filter: A single bucket aggregation that narrows the set of
+        documents to those that match a query.
+    """
+
+    name = "filter"
+    _param_defs = {
+        "filter": {"type": "query"},
+        "aggs": {"type": "agg", "hash": True},
+    }
+
+    def __init__(self, filter: Union[Query, "DefaultType"] = DEFAULT, **kwargs: Any):
+        super().__init__(filter=filter, **kwargs)
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super().to_dict()
+        if isinstance(d[self.name], dict):
+            n = cast(AttrDict[Any], d[self.name])
+            n.update(n.pop("filter", {}))
+        return d
+
+
+class Filters(Bucket[_R]):
+    """
+    A multi-bucket aggregation where each bucket contains the documents
+    that match a query.
+
+    :arg filters: Collection of queries from which to build buckets.
+    :arg other_bucket: Set to `true` to add a bucket to the response which
+        will contain all documents that do not match any of the given
+        filters.
+    :arg other_bucket_key: The key with which the other bucket is
+        returned. Defaults to `_other_` if omitted.
+    :arg keyed: By default, the named filters aggregation returns the
+        buckets as an object. Set to `false` to return the buckets as an
+        array of objects. Defaults to `True` if omitted.
+    """
+
+    name = "filters"
+    _param_defs = {
+        "filters": {"type": "query", "hash": True},
+        "aggs": {"type": "agg", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *,
+        filters: Union[Dict[str, Query], "DefaultType"] = DEFAULT,
+        other_bucket: Union[bool, "DefaultType"] = DEFAULT,
+        other_bucket_key: Union[str, "DefaultType"] = DEFAULT,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            filters=filters,
+            other_bucket=other_bucket,
+            other_bucket_key=other_bucket_key,
+            keyed=keyed,
+            **kwargs,
+        )
+
+
+class GeoBounds(Agg[_R]):
+    """
+    A metric aggregation that computes the geographic bounding box
+    containing all values for a Geopoint or Geoshape field.
+
+    :arg wrap_longitude: Specifies whether the bounding box should be
+        allowed to overlap the international date line. Defaults to `True`
+        if omitted.
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "geo_bounds"
+
+    def __init__(
+        self,
+        *,
+        wrap_longitude: Union[bool, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            wrap_longitude=wrap_longitude,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class GeoCentroid(Agg[_R]):
+    """
+    A metric aggregation that computes the weighted centroid from all
+    coordinate values for geo fields.
+
+    :arg count:
+    :arg location:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "geo_centroid"
+
+    def __init__(
+        self,
+        *,
+        count: Union[int, "DefaultType"] = DEFAULT,
+        location: Union[
+            "types.LatLonGeoLocation",
+            "types.GeoHashLocation",
+            Sequence[float],
+            str,
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            count=count,
+            location=location,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class GeoDistance(Bucket[_R]):
+    """
+    A multi-bucket aggregation that works on `geo_point` fields. Evaluates
+    the distance of each document value from an origin point and
+    determines the buckets it belongs to, based on ranges defined in the
+    request.
+
+    :arg distance_type: The distance calculation type. Defaults to `arc`
+        if omitted.
+    :arg field: A field of type `geo_point` used to evaluate the distance.
+    :arg origin: The origin  used to evaluate the distance.
+    :arg ranges: An array of ranges used to bucket documents.
+    :arg unit: The distance unit. Defaults to `m` if omitted.
+    """
+
+    name = "geo_distance"
+
+    def __init__(
+        self,
+        *,
+        distance_type: Union[Literal["arc", "plane"], "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        origin: Union[
+            "types.LatLonGeoLocation",
+            "types.GeoHashLocation",
+            Sequence[float],
+            str,
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        ranges: Union[
+            Sequence["wrappers.AggregationRange"],
+            Sequence[Dict[str, Any]],
+            "DefaultType",
+        ] = DEFAULT,
+        unit: Union[
+            Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            distance_type=distance_type,
+            field=field,
+            origin=origin,
+            ranges=ranges,
+            unit=unit,
+            **kwargs,
+        )
+
+
+class GeohashGrid(Bucket[_R]):
+    """
+    A multi-bucket aggregation that groups `geo_point` and `geo_shape`
+    values into buckets that represent a grid. Each cell is labeled using
+    a geohash which is of user-definable precision.
+
+    :arg bounds: The bounding box to filter the points in each bucket.
+    :arg field: Field containing indexed `geo_point` or `geo_shape`
+        values. If the field contains an array, `geohash_grid` aggregates
+        all array values.
+    :arg precision: The string length of the geohashes used to define
+        cells/buckets in the results. Defaults to `5` if omitted.
+    :arg shard_size: Allows for more accurate counting of the top cells
+        returned in the final result the aggregation. Defaults to
+        returning `max(10,(size x number-of-shards))` buckets from each
+        shard.
+    :arg size: The maximum number of geohash buckets to return. Defaults
+        to `10000` if omitted.
+    """
+
+    name = "geohash_grid"
+
+    def __init__(
+        self,
+        *,
+        bounds: Union[
+            "types.CoordsGeoBounds",
+            "types.TopLeftBottomRightGeoBounds",
+            "types.TopRightBottomLeftGeoBounds",
+            "types.WktGeoBounds",
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        precision: Union[float, str, "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            bounds=bounds,
+            field=field,
+            precision=precision,
+            shard_size=shard_size,
+            size=size,
+            **kwargs,
+        )
+
+
+class GeoLine(Agg[_R]):
+    """
+    Aggregates all `geo_point` values within a bucket into a `LineString`
+    ordered by the chosen sort field.
+
+    :arg point: (required) The name of the geo_point field.
+    :arg sort: (required) The name of the numeric field to use as the sort
+        key for ordering the points. When the `geo_line` aggregation is
+        nested inside a `time_series` aggregation, this field defaults to
+        `@timestamp`, and any other value will result in error.
+    :arg include_sort: When `true`, returns an additional array of the
+        sort values in the feature properties.
+    :arg sort_order: The order in which the line is sorted (ascending or
+        descending). Defaults to `asc` if omitted.
+    :arg size: The maximum length of the line represented in the
+        aggregation. Valid sizes are between 1 and 10000. Defaults to
+        `10000` if omitted.
+    """
+
+    name = "geo_line"
+
+    def __init__(
+        self,
+        *,
+        point: Union["types.GeoLinePoint", Dict[str, Any], "DefaultType"] = DEFAULT,
+        sort: Union["types.GeoLineSort", Dict[str, Any], "DefaultType"] = DEFAULT,
+        include_sort: Union[bool, "DefaultType"] = DEFAULT,
+        sort_order: Union[Literal["asc", "desc"], "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            point=point,
+            sort=sort,
+            include_sort=include_sort,
+            sort_order=sort_order,
+            size=size,
+            **kwargs,
+        )
+
+
+class GeotileGrid(Bucket[_R]):
+    """
+    A multi-bucket aggregation that groups `geo_point` and `geo_shape`
+    values into buckets that represent a grid. Each cell corresponds to a
+    map tile as used by many online map sites.
+
+    :arg field: Field containing indexed `geo_point` or `geo_shape`
+        values. If the field contains an array, `geotile_grid` aggregates
+        all array values.
+    :arg precision: Integer zoom of the key used to define cells/buckets
+        in the results. Values outside of the range [0,29] will be
+        rejected. Defaults to `7` if omitted.
+    :arg shard_size: Allows for more accurate counting of the top cells
+        returned in the final result the aggregation. Defaults to
+        returning `max(10,(size x number-of-shards))` buckets from each
+        shard.
+    :arg size: The maximum number of buckets to return. Defaults to
+        `10000` if omitted.
+    :arg bounds: A bounding box to filter the geo-points or geo-shapes in
+        each bucket.
+    """
+
+    name = "geotile_grid"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        precision: Union[float, "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        bounds: Union[
+            "types.CoordsGeoBounds",
+            "types.TopLeftBottomRightGeoBounds",
+            "types.TopRightBottomLeftGeoBounds",
+            "types.WktGeoBounds",
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            precision=precision,
+            shard_size=shard_size,
+            size=size,
+            bounds=bounds,
+            **kwargs,
+        )
+
+
+class GeohexGrid(Bucket[_R]):
+    """
+    A multi-bucket aggregation that groups `geo_point` and `geo_shape`
+    values into buckets that represent a grid. Each cell corresponds to a
+    H3 cell index and is labeled using the H3Index representation.
+
+    :arg field: (required) Field containing indexed `geo_point` or
+        `geo_shape` values. If the field contains an array, `geohex_grid`
+        aggregates all array values.
+    :arg precision: Integer zoom of the key used to defined cells or
+        buckets in the results. Value should be between 0-15. Defaults to
+        `6` if omitted.
+    :arg bounds: Bounding box used to filter the geo-points in each
+        bucket.
+    :arg size: Maximum number of buckets to return. Defaults to `10000` if
+        omitted.
+    :arg shard_size: Number of buckets returned from each shard.
+    """
+
+    name = "geohex_grid"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        precision: Union[int, "DefaultType"] = DEFAULT,
+        bounds: Union[
+            "types.CoordsGeoBounds",
+            "types.TopLeftBottomRightGeoBounds",
+            "types.TopRightBottomLeftGeoBounds",
+            "types.WktGeoBounds",
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            precision=precision,
+            bounds=bounds,
+            size=size,
+            shard_size=shard_size,
+            **kwargs,
+        )
+
+
+class Global(Bucket[_R]):
+    """
+    Defines a single bucket of all the documents within the search
+    execution context. This context is defined by the indices and the
+    document types you’re searching on, but is not influenced by the
+    search query itself.
+    """
+
+    name = "global"
+
+    def __init__(self, **kwargs: Any):
+        super().__init__(**kwargs)
+
+
+class Histogram(Bucket[_R]):
+    """
+    A multi-bucket values source based aggregation that can be applied on
+    numeric values or numeric range values extracted from the documents.
+    It dynamically builds fixed size (interval) buckets over the values.
+
+    :arg extended_bounds: Enables extending the bounds of the histogram
+        beyond the data itself.
+    :arg hard_bounds: Limits the range of buckets in the histogram. It is
+        particularly useful in the case of open data ranges that can
+        result in a very large number of buckets.
+    :arg field: The name of the field to aggregate on.
+    :arg interval: The interval for the buckets. Must be a positive
+        decimal.
+    :arg min_doc_count: Only returns buckets that have `min_doc_count`
+        number of documents. By default, the response will fill gaps in
+        the histogram with empty buckets.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg offset: By default, the bucket keys start with 0 and then
+        continue in even spaced steps of `interval`. The bucket boundaries
+        can be shifted by using the `offset` option.
+    :arg order: The sort order of the returned buckets. By default, the
+        returned buckets are sorted by their key ascending.
+    :arg script:
+    :arg format:
+    :arg keyed: If `true`, returns buckets as a hash instead of an array,
+        keyed by the bucket keys.
+    """
+
+    name = "histogram"
+
+    def __init__(
+        self,
+        *,
+        extended_bounds: Union[
+            "types.ExtendedBounds", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        hard_bounds: Union[
+            "types.ExtendedBounds", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        interval: Union[float, "DefaultType"] = DEFAULT,
+        min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        missing: Union[float, "DefaultType"] = DEFAULT,
+        offset: Union[float, "DefaultType"] = DEFAULT,
+        order: Union[
+            Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]],
+            Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]],
+            "DefaultType",
+        ] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            extended_bounds=extended_bounds,
+            hard_bounds=hard_bounds,
+            field=field,
+            interval=interval,
+            min_doc_count=min_doc_count,
+            missing=missing,
+            offset=offset,
+            order=order,
+            script=script,
+            format=format,
+            keyed=keyed,
+            **kwargs,
+        )
+
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return FieldBucketData(self, search, data)
+
+
+class IPRange(Bucket[_R]):
+    """
+    A multi-bucket value source based aggregation that enables the user to
+    define a set of IP ranges - each representing a bucket.
+
+    :arg field: The date field whose values are used to build ranges.
+    :arg ranges: Array of IP ranges.
+    """
+
+    name = "ip_range"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        ranges: Union[
+            Sequence["types.IpRangeAggregationRange"],
+            Sequence[Dict[str, Any]],
+            "DefaultType",
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(field=field, ranges=ranges, **kwargs)
+
+
+class IPPrefix(Bucket[_R]):
+    """
+    A bucket aggregation that groups documents based on the network or
+    sub-network of an IP address.
+
+    :arg field: (required) The IP address field to aggregation on. The
+        field mapping type must be `ip`.
+    :arg prefix_length: (required) Length of the network prefix. For IPv4
+        addresses the accepted range is [0, 32]. For IPv6 addresses the
+        accepted range is [0, 128].
+    :arg is_ipv6: Defines whether the prefix applies to IPv6 addresses.
+    :arg append_prefix_length: Defines whether the prefix length is
+        appended to IP address keys in the response.
+    :arg keyed: Defines whether buckets are returned as a hash rather than
+        an array in the response.
+    :arg min_doc_count: Minimum number of documents in a bucket for it to
+        be included in the response. Defaults to `1` if omitted.
+    """
+
+    name = "ip_prefix"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        prefix_length: Union[int, "DefaultType"] = DEFAULT,
+        is_ipv6: Union[bool, "DefaultType"] = DEFAULT,
+        append_prefix_length: Union[bool, "DefaultType"] = DEFAULT,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            prefix_length=prefix_length,
+            is_ipv6=is_ipv6,
+            append_prefix_length=append_prefix_length,
+            keyed=keyed,
+            min_doc_count=min_doc_count,
+            **kwargs,
+        )
+
+
+class Inference(Pipeline[_R]):
+    """
+    A parent pipeline aggregation which loads a pre-trained model and
+    performs inference on the collated result fields from the parent
+    bucket aggregation.
+
+    :arg model_id: (required) The ID or alias for the trained model.
+    :arg inference_config: Contains the inference type and its options.
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "inference"
+
+    def __init__(
+        self,
+        *,
+        model_id: Union[str, "DefaultType"] = DEFAULT,
+        inference_config: Union[
+            "types.InferenceConfigContainer", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            model_id=model_id,
+            inference_config=inference_config,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class Line(Agg[_R]):
+    """
+    :arg point: (required) The name of the geo_point field.
+    :arg sort: (required) The name of the numeric field to use as the sort
+        key for ordering the points. When the `geo_line` aggregation is
+        nested inside a `time_series` aggregation, this field defaults to
+        `@timestamp`, and any other value will result in error.
+    :arg include_sort: When `true`, returns an additional array of the
+        sort values in the feature properties.
+    :arg sort_order: The order in which the line is sorted (ascending or
+        descending). Defaults to `asc` if omitted.
+    :arg size: The maximum length of the line represented in the
+        aggregation. Valid sizes are between 1 and 10000. Defaults to
+        `10000` if omitted.
+    """
+
+    name = "line"
+
+    def __init__(
+        self,
+        *,
+        point: Union["types.GeoLinePoint", Dict[str, Any], "DefaultType"] = DEFAULT,
+        sort: Union["types.GeoLineSort", Dict[str, Any], "DefaultType"] = DEFAULT,
+        include_sort: Union[bool, "DefaultType"] = DEFAULT,
+        sort_order: Union[Literal["asc", "desc"], "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            point=point,
+            sort=sort,
+            include_sort=include_sort,
+            sort_order=sort_order,
+            size=size,
+            **kwargs,
+        )
+
+
+class MatrixStats(Agg[_R]):
+    """
+    A numeric aggregation that computes the following statistics over a
+    set of document fields: `count`, `mean`, `variance`, `skewness`,
+    `kurtosis`, `covariance`, and `covariance`.
+
+    :arg mode: Array value the aggregation will use for array or multi-
+        valued fields. Defaults to `avg` if omitted.
+    :arg fields: An array of fields for computing the statistics.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    """
+
+    name = "matrix_stats"
+
+    def __init__(
+        self,
+        *,
+        mode: Union[
+            Literal["min", "max", "sum", "avg", "median"], "DefaultType"
+        ] = DEFAULT,
+        fields: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        missing: Union[
+            Mapping[Union[str, "InstrumentedField"], float], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(mode=mode, fields=fields, missing=missing, **kwargs)
+
+
+class Max(Agg[_R]):
+    """
+    A single-value metrics aggregation that returns the maximum value
+    among the numeric values extracted from the aggregated documents.
+
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "max"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, field=field, missing=missing, script=script, **kwargs
+        )
+
+
+class MaxBucket(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which identifies the bucket(s) with the
+    maximum value of a specified metric in a sibling aggregation and
+    outputs both the value and the key(s) of the bucket(s).
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "max_bucket"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class MedianAbsoluteDeviation(Agg[_R]):
+    """
+    A single-value aggregation that approximates the median absolute
+    deviation of its search results.
+
+    :arg compression: Limits the maximum number of nodes used by the
+        underlying TDigest algorithm to `20 * compression`, enabling
+        control of memory usage and approximation error. Defaults to
+        `1000` if omitted.
+    :arg execution_hint: The default implementation of TDigest is
+        optimized for performance, scaling to millions or even billions of
+        sample values while maintaining acceptable accuracy levels (close
+        to 1% relative error for millions of samples in some cases). To
+        use an implementation optimized for accuracy, set this parameter
+        to high_accuracy instead. Defaults to `default` if omitted.
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "median_absolute_deviation"
+
+    def __init__(
+        self,
+        *,
+        compression: Union[float, "DefaultType"] = DEFAULT,
+        execution_hint: Union[
+            Literal["default", "high_accuracy"], "DefaultType"
+        ] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            compression=compression,
+            execution_hint=execution_hint,
+            format=format,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class Min(Agg[_R]):
+    """
+    A single-value metrics aggregation that returns the minimum value
+    among numeric values extracted from the aggregated documents.
+
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "min"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, field=field, missing=missing, script=script, **kwargs
+        )
+
+
+class MinBucket(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which identifies the bucket(s) with the
+    minimum value of a specified metric in a sibling aggregation and
+    outputs both the value and the key(s) of the bucket(s).
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "min_bucket"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class Missing(Bucket[_R]):
+    """
+    A field data based single bucket aggregation, that creates a bucket of
+    all documents in the current document set context that are missing a
+    field value (effectively, missing a field or having the configured
+    NULL value set).
+
+    :arg field: The name of the field.
+    :arg missing:
+    """
+
+    name = "missing"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(field=field, missing=missing, **kwargs)
+
+
+class MovingAvg(Pipeline[_R]):
+    """ """
+
+    name = "moving_avg"
+
+    def __init__(self, **kwargs: Any):
+        super().__init__(**kwargs)
+
+
+class LinearMovingAverageAggregation(MovingAvg[_R]):
+    """
+    :arg model: (required)
+    :arg settings: (required)
+    :arg minimize:
+    :arg predict:
+    :arg window:
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    def __init__(
+        self,
+        *,
+        model: Any = DEFAULT,
+        settings: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT,
+        minimize: Union[bool, "DefaultType"] = DEFAULT,
+        predict: Union[int, "DefaultType"] = DEFAULT,
+        window: Union[int, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            model=model,
+            settings=settings,
+            minimize=minimize,
+            predict=predict,
+            window=window,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class SimpleMovingAverageAggregation(MovingAvg[_R]):
+    """
+    :arg model: (required)
+    :arg settings: (required)
+    :arg minimize:
+    :arg predict:
+    :arg window:
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    def __init__(
+        self,
+        *,
+        model: Any = DEFAULT,
+        settings: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT,
+        minimize: Union[bool, "DefaultType"] = DEFAULT,
+        predict: Union[int, "DefaultType"] = DEFAULT,
+        window: Union[int, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            model=model,
+            settings=settings,
+            minimize=minimize,
+            predict=predict,
+            window=window,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class EwmaMovingAverageAggregation(MovingAvg[_R]):
+    """
+    :arg model: (required)
+    :arg settings: (required)
+    :arg minimize:
+    :arg predict:
+    :arg window:
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    def __init__(
+        self,
+        *,
+        model: Any = DEFAULT,
+        settings: Union[
+            "types.EwmaModelSettings", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        minimize: Union[bool, "DefaultType"] = DEFAULT,
+        predict: Union[int, "DefaultType"] = DEFAULT,
+        window: Union[int, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            model=model,
+            settings=settings,
+            minimize=minimize,
+            predict=predict,
+            window=window,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class HoltMovingAverageAggregation(MovingAvg[_R]):
+    """
+    :arg model: (required)
+    :arg settings: (required)
+    :arg minimize:
+    :arg predict:
+    :arg window:
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    def __init__(
+        self,
+        *,
+        model: Any = DEFAULT,
+        settings: Union[
+            "types.HoltLinearModelSettings", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        minimize: Union[bool, "DefaultType"] = DEFAULT,
+        predict: Union[int, "DefaultType"] = DEFAULT,
+        window: Union[int, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            model=model,
+            settings=settings,
+            minimize=minimize,
+            predict=predict,
+            window=window,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class HoltWintersMovingAverageAggregation(MovingAvg[_R]):
+    """
+    :arg model: (required)
+    :arg settings: (required)
+    :arg minimize:
+    :arg predict:
+    :arg window:
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    def __init__(
+        self,
+        *,
+        model: Any = DEFAULT,
+        settings: Union[
+            "types.HoltWintersModelSettings", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        minimize: Union[bool, "DefaultType"] = DEFAULT,
+        predict: Union[int, "DefaultType"] = DEFAULT,
+        window: Union[int, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            model=model,
+            settings=settings,
+            minimize=minimize,
+            predict=predict,
+            window=window,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class MovingPercentiles(Pipeline[_R]):
+    """
+    Given an ordered series of percentiles, "slides" a window across those
+    percentiles and computes cumulative percentiles.
+
+    :arg window: The size of window to "slide" across the histogram.
+    :arg shift: By default, the window consists of the last n values
+        excluding the current bucket. Increasing `shift` by 1, moves the
+        starting window position by 1 to the right.
+    :arg keyed:
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "moving_percentiles"
+
+    def __init__(
+        self,
+        *,
+        window: Union[int, "DefaultType"] = DEFAULT,
+        shift: Union[int, "DefaultType"] = DEFAULT,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            window=window,
+            shift=shift,
+            keyed=keyed,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class MovingFn(Pipeline[_R]):
+    """
+    Given an ordered series of data, "slides" a window across the data and
+    runs a custom script on each window of data. For convenience, a number
+    of common functions are predefined such as `min`, `max`, and moving
+    averages.
+
+    :arg script: The script that should be executed on each window of
+        data.
+    :arg shift: By default, the window consists of the last n values
+        excluding the current bucket. Increasing `shift` by 1, moves the
+        starting window position by 1 to the right.
+    :arg window: The size of window to "slide" across the histogram.
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "moving_fn"
+
+    def __init__(
+        self,
+        *,
+        script: Union[str, "DefaultType"] = DEFAULT,
+        shift: Union[int, "DefaultType"] = DEFAULT,
+        window: Union[int, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            script=script,
+            shift=shift,
+            window=window,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class MultiTerms(Bucket[_R]):
+    """
+    A multi-bucket value source based aggregation where buckets are
+    dynamically built - one per unique set of values.
+
+    :arg terms: (required) The field from which to generate sets of terms.
+    :arg collect_mode: Specifies the strategy for data collection.
+        Defaults to `breadth_first` if omitted.
+    :arg order: Specifies the sort order of the buckets. Defaults to
+        sorting by descending document count.
+    :arg min_doc_count: The minimum number of documents in a bucket for it
+        to be returned. Defaults to `1` if omitted.
+    :arg shard_min_doc_count: The minimum number of documents in a bucket
+        on each shard for it to be returned. Defaults to `1` if omitted.
+    :arg shard_size: The number of candidate terms produced by each shard.
+        By default, `shard_size` will be automatically estimated based on
+        the number of shards and the `size` parameter.
+    :arg show_term_doc_count_error: Calculates the doc count error on per
+        term basis.
+    :arg size: The number of term buckets should be returned out of the
+        overall terms list. Defaults to `10` if omitted.
+    """
+
+    name = "multi_terms"
+
+    def __init__(
+        self,
+        *,
+        terms: Union[
+            Sequence["types.MultiTermLookup"], Sequence[Dict[str, Any]], "DefaultType"
+        ] = DEFAULT,
+        collect_mode: Union[
+            Literal["depth_first", "breadth_first"], "DefaultType"
+        ] = DEFAULT,
+        order: Union[
+            Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]],
+            Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]],
+            "DefaultType",
+        ] = DEFAULT,
+        min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        show_term_doc_count_error: Union[bool, "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            terms=terms,
+            collect_mode=collect_mode,
+            order=order,
+            min_doc_count=min_doc_count,
+            shard_min_doc_count=shard_min_doc_count,
+            shard_size=shard_size,
+            show_term_doc_count_error=show_term_doc_count_error,
+            size=size,
+            **kwargs,
+        )
+
+
+class Nested(Bucket[_R]):
+    """
+    A special single bucket aggregation that enables aggregating nested
+    documents.
+
+    :arg path: The path to the field of type `nested`.
+    """
+
+    name = "nested"
+
+    def __init__(
+        self,
+        path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(path=path, **kwargs)
+
+
+class Normalize(Pipeline[_R]):
+    """
+    A parent pipeline aggregation which calculates the specific
+    normalized/rescaled value for a specific bucket value.
+
+    :arg method: The specific method to apply.
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "normalize"
+
+    def __init__(
+        self,
+        *,
+        method: Union[
+            Literal[
+                "rescale_0_1",
+                "rescale_0_100",
+                "percent_of_sum",
+                "mean",
+                "z-score",
+                "softmax",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            method=method,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class Parent(Bucket[_R]):
+    """
+    A special single bucket aggregation that selects parent documents that
+    have the specified type, as defined in a `join` field.
+
+    :arg type: The child type that should be selected.
+    """
+
+    name = "parent"
+
+    def __init__(self, type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any):
+        super().__init__(type=type, **kwargs)
+
+
+class PercentileRanks(Agg[_R]):
+    """
+    A multi-value metrics aggregation that calculates one or more
+    percentile ranks over numeric values extracted from the aggregated
+    documents.
+
+    :arg keyed: By default, the aggregation associates a unique string key
+        with each bucket and returns the ranges as a hash rather than an
+        array. Set to `false` to disable this behavior. Defaults to `True`
+        if omitted.
+    :arg values: An array of values for which to calculate the percentile
+        ranks.
+    :arg hdr: Uses the alternative High Dynamic Range Histogram algorithm
+        to calculate percentile ranks.
+    :arg tdigest: Sets parameters for the default TDigest algorithm used
+        to calculate percentile ranks.
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "percentile_ranks"
+
+    def __init__(
+        self,
+        *,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        values: Union[Sequence[float], None, "DefaultType"] = DEFAULT,
+        hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT,
+        tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            keyed=keyed,
+            values=values,
+            hdr=hdr,
+            tdigest=tdigest,
+            format=format,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class Percentiles(Agg[_R]):
+    """
+    A multi-value metrics aggregation that calculates one or more
+    percentiles over numeric values extracted from the aggregated
+    documents.
+
+    :arg keyed: By default, the aggregation associates a unique string key
+        with each bucket and returns the ranges as a hash rather than an
+        array. Set to `false` to disable this behavior. Defaults to `True`
+        if omitted.
+    :arg percents: The percentiles to calculate.
+    :arg hdr: Uses the alternative High Dynamic Range Histogram algorithm
+        to calculate percentiles.
+    :arg tdigest: Sets parameters for the default TDigest algorithm used
+        to calculate percentiles.
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "percentiles"
+
+    def __init__(
+        self,
+        *,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        percents: Union[Sequence[float], "DefaultType"] = DEFAULT,
+        hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT,
+        tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            keyed=keyed,
+            percents=percents,
+            hdr=hdr,
+            tdigest=tdigest,
+            format=format,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class PercentilesBucket(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which calculates percentiles across all
+    bucket of a specified metric in a sibling aggregation.
+
+    :arg percents: The list of percentiles to calculate.
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "percentiles_bucket"
+
+    def __init__(
+        self,
+        *,
+        percents: Union[Sequence[float], "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            percents=percents,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class Range(Bucket[_R]):
+    """
+    A multi-bucket value source based aggregation that enables the user to
+    define a set of ranges - each representing a bucket.
+
+    :arg field: The date field whose values are use to build ranges.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg ranges: An array of ranges used to bucket documents.
+    :arg script:
+    :arg keyed: Set to `true` to associate a unique string key with each
+        bucket and return the ranges as a hash rather than an array.
+    :arg format:
+    """
+
+    name = "range"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[int, "DefaultType"] = DEFAULT,
+        ranges: Union[
+            Sequence["wrappers.AggregationRange"],
+            Sequence[Dict[str, Any]],
+            "DefaultType",
+        ] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            missing=missing,
+            ranges=ranges,
+            script=script,
+            keyed=keyed,
+            format=format,
+            **kwargs,
+        )
+
+
+class RareTerms(Bucket[_R]):
+    """
+    A multi-bucket value source based aggregation which finds "rare"
+    terms — terms that are at the long-tail of the distribution and are
+    not frequent.
+
+    :arg exclude: Terms that should be excluded from the aggregation.
+    :arg field: The field from which to return rare terms.
+    :arg include: Terms that should be included in the aggregation.
+    :arg max_doc_count: The maximum number of documents a term should
+        appear in. Defaults to `1` if omitted.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg precision: The precision of the internal CuckooFilters. Smaller
+        precision leads to better approximation, but higher memory usage.
+        Defaults to `0.001` if omitted.
+    :arg value_type:
+    """
+
+    name = "rare_terms"
+
+    def __init__(
+        self,
+        *,
+        exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        include: Union[
+            str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        max_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        precision: Union[float, "DefaultType"] = DEFAULT,
+        value_type: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            exclude=exclude,
+            field=field,
+            include=include,
+            max_doc_count=max_doc_count,
+            missing=missing,
+            precision=precision,
+            value_type=value_type,
+            **kwargs,
+        )
+
+
+class Rate(Agg[_R]):
+    """
+    Calculates a rate of documents or a field in each bucket. Can only be
+    used inside a `date_histogram` or `composite` aggregation.
+
+    :arg unit: The interval used to calculate the rate. By default, the
+        interval of the `date_histogram` is used.
+    :arg mode: How the rate is calculated. Defaults to `sum` if omitted.
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "rate"
+
+    def __init__(
+        self,
+        *,
+        unit: Union[
+            Literal[
+                "second", "minute", "hour", "day", "week", "month", "quarter", "year"
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        mode: Union[Literal["sum", "value_count"], "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            unit=unit,
+            mode=mode,
+            format=format,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class ReverseNested(Bucket[_R]):
+    """
+    A special single bucket aggregation that enables aggregating on parent
+    documents from nested documents. Should only be defined inside a
+    `nested` aggregation.
+
+    :arg path: Defines the nested object field that should be joined back
+        to. The default is empty, which means that it joins back to the
+        root/main document level.
+    """
+
+    name = "reverse_nested"
+
+    def __init__(
+        self,
+        path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(path=path, **kwargs)
+
+
+class RandomSampler(Bucket[_R]):
+    """
+    A single bucket aggregation that randomly includes documents in the
+    aggregated results. Sampling provides significant speed improvement at
+    the cost of accuracy.
+
+    :arg probability: (required) The probability that a document will be
+        included in the aggregated data. Must be greater than 0, less than
+        0.5, or exactly 1. The lower the probability, the fewer documents
+        are matched.
+    :arg seed: The seed to generate the random sampling of documents. When
+        a seed is provided, the random subset of documents is the same
+        between calls.
+    :arg shard_seed: When combined with seed, setting shard_seed ensures
+        100% consistent sampling over shards where data is exactly the
+        same.
+    """
+
+    name = "random_sampler"
+
+    def __init__(
+        self,
+        *,
+        probability: Union[float, "DefaultType"] = DEFAULT,
+        seed: Union[int, "DefaultType"] = DEFAULT,
+        shard_seed: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            probability=probability, seed=seed, shard_seed=shard_seed, **kwargs
+        )
+
+
+class Sampler(Bucket[_R]):
+    """
+    A filtering aggregation used to limit any sub aggregations' processing
+    to a sample of the top-scoring documents.
+
+    :arg shard_size: Limits how many top-scoring documents are collected
+        in the sample processed on each shard. Defaults to `100` if
+        omitted.
+    """
+
+    name = "sampler"
+
+    def __init__(self, shard_size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any):
+        super().__init__(shard_size=shard_size, **kwargs)
+
+
+class ScriptedMetric(Agg[_R]):
+    """
+    A metric aggregation that uses scripts to provide a metric output.
+
+    :arg combine_script: Runs once on each shard after document collection
+        is complete. Allows the aggregation to consolidate the state
+        returned from each shard.
+    :arg init_script: Runs prior to any collection of documents. Allows
+        the aggregation to set up any initial state.
+    :arg map_script: Run once per document collected. If no
+        `combine_script` is specified, the resulting state needs to be
+        stored in the `state` object.
+    :arg params: A global object with script parameters for `init`, `map`
+        and `combine` scripts. It is shared between the scripts.
+    :arg reduce_script: Runs once on the coordinating node after all
+        shards have returned their results. The script is provided with
+        access to a variable `states`, which is an array of the result of
+        the `combine_script` on each shard.
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "scripted_metric"
+
+    def __init__(
+        self,
+        *,
+        combine_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        init_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        map_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT,
+        reduce_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            combine_script=combine_script,
+            init_script=init_script,
+            map_script=map_script,
+            params=params,
+            reduce_script=reduce_script,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class SerialDiff(Pipeline[_R]):
+    """
+    An aggregation that subtracts values in a time series from themselves
+    at different time lags or periods.
+
+    :arg lag: The historical bucket to subtract from the current value.
+        Must be a positive, non-zero integer.
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "serial_diff"
+
+    def __init__(
+        self,
+        *,
+        lag: Union[int, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            lag=lag,
+            format=format,
+            gap_policy=gap_policy,
+            buckets_path=buckets_path,
+            **kwargs,
+        )
+
+
+class SignificantTerms(Bucket[_R]):
+    """
+    Returns interesting or unusual occurrences of terms in a set.
+
+    :arg background_filter: A background filter that can be used to focus
+        in on significant terms within a narrower context, instead of the
+        entire index.
+    :arg chi_square: Use Chi square, as described in "Information
+        Retrieval", Manning et al., Chapter 13.5.2, as the significance
+        score.
+    :arg exclude: Terms to exclude.
+    :arg execution_hint: Mechanism by which the aggregation should be
+        executed: using field values directly or using global ordinals.
+    :arg field: The field from which to return significant terms.
+    :arg gnd: Use Google normalized distance as described in "The Google
+        Similarity Distance", Cilibrasi and Vitanyi, 2007, as the
+        significance score.
+    :arg include: Terms to include.
+    :arg jlh: Use JLH score as the significance score.
+    :arg min_doc_count: Only return terms that are found in more than
+        `min_doc_count` hits. Defaults to `3` if omitted.
+    :arg mutual_information: Use mutual information as described in
+        "Information Retrieval", Manning et al., Chapter 13.5.1, as the
+        significance score.
+    :arg percentage: A simple calculation of the number of documents in
+        the foreground sample with a term divided by the number of
+        documents in the background with the term.
+    :arg script_heuristic: Customized score, implemented via a script.
+    :arg p_value: Significant terms heuristic that calculates the p-value
+        between the term existing in foreground and background sets.  The
+        p-value is the probability of obtaining test results at least as
+        extreme as the results actually observed, under the assumption
+        that the null hypothesis is correct. The p-value is calculated
+        assuming that the foreground set and the background set are
+        independent https://en.wikipedia.org/wiki/Bernoulli_trial, with
+        the null hypothesis that the probabilities are the same.
+    :arg shard_min_doc_count: Regulates the certainty a shard has if the
+        term should actually be added to the candidate list or not with
+        respect to the `min_doc_count`. Terms will only be considered if
+        their local shard frequency within the set is higher than the
+        `shard_min_doc_count`.
+    :arg shard_size: Can be used to control the volumes of candidate terms
+        produced by each shard. By default, `shard_size` will be
+        automatically estimated based on the number of shards and the
+        `size` parameter.
+    :arg size: The number of buckets returned out of the overall terms
+        list.
+    """
+
+    name = "significant_terms"
+    _param_defs = {
+        "background_filter": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        background_filter: Union[Query, "DefaultType"] = DEFAULT,
+        chi_square: Union[
+            "types.ChiSquareHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+        execution_hint: Union[
+            Literal[
+                "map",
+                "global_ordinals",
+                "global_ordinals_hash",
+                "global_ordinals_low_cardinality",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        gnd: Union[
+            "types.GoogleNormalizedDistanceHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        include: Union[
+            str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        jlh: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT,
+        min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        mutual_information: Union[
+            "types.MutualInformationHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        percentage: Union[
+            "types.PercentageScoreHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        script_heuristic: Union[
+            "types.ScriptedHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        p_value: Union[
+            "types.PValueHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            background_filter=background_filter,
+            chi_square=chi_square,
+            exclude=exclude,
+            execution_hint=execution_hint,
+            field=field,
+            gnd=gnd,
+            include=include,
+            jlh=jlh,
+            min_doc_count=min_doc_count,
+            mutual_information=mutual_information,
+            percentage=percentage,
+            script_heuristic=script_heuristic,
+            p_value=p_value,
+            shard_min_doc_count=shard_min_doc_count,
+            shard_size=shard_size,
+            size=size,
+            **kwargs,
+        )
+
+
+class SignificantText(Bucket[_R]):
+    """
+    Returns interesting or unusual occurrences of free-text terms in a
+    set.
+
+    :arg background_filter: A background filter that can be used to focus
+        in on significant terms within a narrower context, instead of the
+        entire index.
+    :arg chi_square: Use Chi square, as described in "Information
+        Retrieval", Manning et al., Chapter 13.5.2, as the significance
+        score.
+    :arg exclude: Values to exclude.
+    :arg execution_hint: Determines whether the aggregation will use field
+        values directly or global ordinals.
+    :arg field: The field from which to return significant text.
+    :arg filter_duplicate_text: Whether to out duplicate text to deal with
+        noisy data.
+    :arg gnd: Use Google normalized distance as described in "The Google
+        Similarity Distance", Cilibrasi and Vitanyi, 2007, as the
+        significance score.
+    :arg include: Values to include.
+    :arg jlh: Use JLH score as the significance score.
+    :arg min_doc_count: Only return values that are found in more than
+        `min_doc_count` hits. Defaults to `3` if omitted.
+    :arg mutual_information: Use mutual information as described in
+        "Information Retrieval", Manning et al., Chapter 13.5.1, as the
+        significance score.
+    :arg percentage: A simple calculation of the number of documents in
+        the foreground sample with a term divided by the number of
+        documents in the background with the term.
+    :arg script_heuristic: Customized score, implemented via a script.
+    :arg shard_min_doc_count: Regulates the certainty a shard has if the
+        values should actually be added to the candidate list or not with
+        respect to the min_doc_count. Values will only be considered if
+        their local shard frequency within the set is higher than the
+        `shard_min_doc_count`.
+    :arg shard_size: The number of candidate terms produced by each shard.
+        By default, `shard_size` will be automatically estimated based on
+        the number of shards and the `size` parameter.
+    :arg size: The number of buckets returned out of the overall terms
+        list.
+    :arg source_fields: Overrides the JSON `_source` fields from which
+        text will be analyzed.
+    """
+
+    name = "significant_text"
+    _param_defs = {
+        "background_filter": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        background_filter: Union[Query, "DefaultType"] = DEFAULT,
+        chi_square: Union[
+            "types.ChiSquareHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+        execution_hint: Union[
+            Literal[
+                "map",
+                "global_ordinals",
+                "global_ordinals_hash",
+                "global_ordinals_low_cardinality",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        filter_duplicate_text: Union[bool, "DefaultType"] = DEFAULT,
+        gnd: Union[
+            "types.GoogleNormalizedDistanceHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        include: Union[
+            str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        jlh: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT,
+        min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        mutual_information: Union[
+            "types.MutualInformationHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        percentage: Union[
+            "types.PercentageScoreHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        script_heuristic: Union[
+            "types.ScriptedHeuristic", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        source_fields: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            background_filter=background_filter,
+            chi_square=chi_square,
+            exclude=exclude,
+            execution_hint=execution_hint,
+            field=field,
+            filter_duplicate_text=filter_duplicate_text,
+            gnd=gnd,
+            include=include,
+            jlh=jlh,
+            min_doc_count=min_doc_count,
+            mutual_information=mutual_information,
+            percentage=percentage,
+            script_heuristic=script_heuristic,
+            shard_min_doc_count=shard_min_doc_count,
+            shard_size=shard_size,
+            size=size,
+            source_fields=source_fields,
+            **kwargs,
+        )
+
+
+class Stats(Agg[_R]):
+    """
+    A multi-value metrics aggregation that computes stats over numeric
+    values extracted from the aggregated documents.
+
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "stats"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, field=field, missing=missing, script=script, **kwargs
+        )
+
+
+class StatsBucket(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which calculates a variety of stats
+    across all bucket of a specified metric in a sibling aggregation.
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "stats_bucket"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class StringStats(Agg[_R]):
+    """
+    A multi-value metrics aggregation that computes statistics over string
+    values extracted from the aggregated documents.
+
+    :arg show_distribution: Shows the probability distribution for all
+        characters.
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "string_stats"
+
+    def __init__(
+        self,
+        *,
+        show_distribution: Union[bool, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            show_distribution=show_distribution,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class Sum(Agg[_R]):
+    """
+    A single-value metrics aggregation that sums numeric values that are
+    extracted from the aggregated documents.
+
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "sum"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, field=field, missing=missing, script=script, **kwargs
+        )
+
+
+class SumBucket(Pipeline[_R]):
+    """
+    A sibling pipeline aggregation which calculates the sum of a specified
+    metric across all buckets in a sibling aggregation.
+
+    :arg format: `DecimalFormat` pattern for the output value. If
+        specified, the formatted value is returned in the aggregation’s
+        `value_as_string` property.
+    :arg gap_policy: Policy to apply when gaps are found in the data.
+        Defaults to `skip` if omitted.
+    :arg buckets_path: Path to the buckets that contain one set of values
+        to correlate.
+    """
+
+    name = "sum_bucket"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        gap_policy: Union[
+            Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
+        ] = DEFAULT,
+        buckets_path: Union[
+            str, Sequence[str], Mapping[str, str], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
+        )
+
+
+class Terms(Bucket[_R]):
+    """
+    A multi-bucket value source based aggregation where buckets are
+    dynamically built - one per unique value.
+
+    :arg collect_mode: Determines how child aggregations should be
+        calculated: breadth-first or depth-first.
+    :arg exclude: Values to exclude. Accepts regular expressions and
+        partitions.
+    :arg execution_hint: Determines whether the aggregation will use field
+        values directly or global ordinals.
+    :arg field: The field from which to return terms.
+    :arg include: Values to include. Accepts regular expressions and
+        partitions.
+    :arg min_doc_count: Only return values that are found in more than
+        `min_doc_count` hits. Defaults to `1` if omitted.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg missing_order:
+    :arg missing_bucket:
+    :arg value_type: Coerced unmapped fields into the specified type.
+    :arg order: Specifies the sort order of the buckets. Defaults to
+        sorting by descending document count.
+    :arg script:
+    :arg shard_min_doc_count: Regulates the certainty a shard has if the
+        term should actually be added to the candidate list or not with
+        respect to the `min_doc_count`. Terms will only be considered if
+        their local shard frequency within the set is higher than the
+        `shard_min_doc_count`.
+    :arg shard_size: The number of candidate terms produced by each shard.
+        By default, `shard_size` will be automatically estimated based on
+        the number of shards and the `size` parameter.
+    :arg show_term_doc_count_error: Set to `true` to return the
+        `doc_count_error_upper_bound`, which is an upper bound to the
+        error on the `doc_count` returned by each shard.
+    :arg size: The number of buckets returned out of the overall terms
+        list. Defaults to `10` if omitted.
+    :arg format:
+    """
+
+    name = "terms"
+
+    def __init__(
+        self,
+        *,
+        collect_mode: Union[
+            Literal["depth_first", "breadth_first"], "DefaultType"
+        ] = DEFAULT,
+        exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+        execution_hint: Union[
+            Literal[
+                "map",
+                "global_ordinals",
+                "global_ordinals_hash",
+                "global_ordinals_low_cardinality",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        include: Union[
+            str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        missing_order: Union[
+            Literal["first", "last", "default"], "DefaultType"
+        ] = DEFAULT,
+        missing_bucket: Union[bool, "DefaultType"] = DEFAULT,
+        value_type: Union[str, "DefaultType"] = DEFAULT,
+        order: Union[
+            Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]],
+            Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]],
+            "DefaultType",
+        ] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        show_term_doc_count_error: Union[bool, "DefaultType"] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            collect_mode=collect_mode,
+            exclude=exclude,
+            execution_hint=execution_hint,
+            field=field,
+            include=include,
+            min_doc_count=min_doc_count,
+            missing=missing,
+            missing_order=missing_order,
+            missing_bucket=missing_bucket,
+            value_type=value_type,
+            order=order,
+            script=script,
+            shard_min_doc_count=shard_min_doc_count,
+            shard_size=shard_size,
+            show_term_doc_count_error=show_term_doc_count_error,
+            size=size,
+            format=format,
+            **kwargs,
+        )
+
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return FieldBucketData(self, search, data)
+
+
+class TimeSeries(Bucket[_R]):
+    """
+    The time series aggregation queries data created using a time series
+    index. This is typically data such as metrics or other data streams
+    with a time component, and requires creating an index using the time
+    series mode.
+
+    :arg size: The maximum number of results to return. Defaults to
+        `10000` if omitted.
+    :arg keyed: Set to `true` to associate a unique string key with each
+        bucket and returns the ranges as a hash rather than an array.
+    """
+
+    name = "time_series"
+
+    def __init__(
+        self,
+        *,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        keyed: Union[bool, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(size=size, keyed=keyed, **kwargs)
+
+
+class TopHits(Agg[_R]):
+    """
+    A metric aggregation that returns the top matching documents per
+    bucket.
+
+    :arg docvalue_fields: Fields for which to return doc values.
+    :arg explain: If `true`, returns detailed information about score
+        computation as part of a hit.
+    :arg fields: Array of wildcard (*) patterns. The request returns
+        values for field names matching these patterns in the hits.fields
+        property of the response.
+    :arg from: Starting document offset.
+    :arg highlight: Specifies the highlighter to use for retrieving
+        highlighted snippets from one or more fields in the search
+        results.
+    :arg script_fields: Returns the result of one or more script
+        evaluations for each hit.
+    :arg size: The maximum number of top matching hits to return per
+        bucket. Defaults to `3` if omitted.
+    :arg sort: Sort order of the top matching hits. By default, the hits
+        are sorted by the score of the main query.
+    :arg _source: Selects the fields of the source that are returned.
+    :arg stored_fields: Returns values for the specified stored fields
+        (fields that use the `store` mapping option).
+    :arg track_scores: If `true`, calculates and returns document scores,
+        even if the scores are not used for sorting.
+    :arg version: If `true`, returns document version as part of a hit.
+    :arg seq_no_primary_term: If `true`, returns sequence number and
+        primary term of the last modification of each hit.
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "top_hits"
+
+    def __init__(
+        self,
+        *,
+        docvalue_fields: Union[
+            Sequence["types.FieldAndFormat"], Sequence[Dict[str, Any]], "DefaultType"
+        ] = DEFAULT,
+        explain: Union[bool, "DefaultType"] = DEFAULT,
+        fields: Union[
+            Sequence["types.FieldAndFormat"], Sequence[Dict[str, Any]], "DefaultType"
+        ] = DEFAULT,
+        from_: Union[int, "DefaultType"] = DEFAULT,
+        highlight: Union["types.Highlight", Dict[str, Any], "DefaultType"] = DEFAULT,
+        script_fields: Union[
+            Mapping[str, "types.ScriptField"], Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        sort: Union[
+            Union[Union[str, "InstrumentedField"], "types.SortOptions"],
+            Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]],
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        _source: Union[
+            bool, "types.SourceFilter", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        stored_fields: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        track_scores: Union[bool, "DefaultType"] = DEFAULT,
+        version: Union[bool, "DefaultType"] = DEFAULT,
+        seq_no_primary_term: Union[bool, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            docvalue_fields=docvalue_fields,
+            explain=explain,
+            fields=fields,
+            from_=from_,
+            highlight=highlight,
+            script_fields=script_fields,
+            size=size,
+            sort=sort,
+            _source=_source,
+            stored_fields=stored_fields,
+            track_scores=track_scores,
+            version=version,
+            seq_no_primary_term=seq_no_primary_term,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return TopHitsData(self, search, data)
+
+
+class TTest(Agg[_R]):
+    """
+    A metrics aggregation that performs a statistical hypothesis test in
+    which the test statistic follows a Student’s t-distribution under the
+    null hypothesis on numeric values extracted from the aggregated
+    documents.
+
+    :arg a: Test population A.
+    :arg b: Test population B.
+    :arg type: The type of test. Defaults to `heteroscedastic` if omitted.
+    """
+
+    name = "t_test"
+
+    def __init__(
+        self,
+        *,
+        a: Union["types.TestPopulation", Dict[str, Any], "DefaultType"] = DEFAULT,
+        b: Union["types.TestPopulation", Dict[str, Any], "DefaultType"] = DEFAULT,
+        type: Union[
+            Literal["paired", "homoscedastic", "heteroscedastic"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(a=a, b=b, type=type, **kwargs)
+
+
+class TopMetrics(Agg[_R]):
+    """
+    A metric aggregation that selects metrics from the document with the
+    largest or smallest sort value.
+
+    :arg metrics: The fields of the top document to return.
+    :arg size: The number of top documents from which to return metrics.
+        Defaults to `1` if omitted.
+    :arg sort: The sort order of the documents.
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "top_metrics"
+
+    def __init__(
+        self,
+        *,
+        metrics: Union[
+            "types.TopMetricsValue",
+            Sequence["types.TopMetricsValue"],
+            Sequence[Dict[str, Any]],
+            "DefaultType",
+        ] = DEFAULT,
+        size: Union[int, "DefaultType"] = DEFAULT,
+        sort: Union[
+            Union[Union[str, "InstrumentedField"], "types.SortOptions"],
+            Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]],
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            metrics=metrics,
+            size=size,
+            sort=sort,
+            field=field,
+            missing=missing,
+            script=script,
+            **kwargs,
+        )
+
+
+class ValueCount(Agg[_R]):
+    """
+    A single-value metrics aggregation that counts the number of values
+    that are extracted from the aggregated documents.
+
+    :arg format:
+    :arg field: The field on which to run the aggregation.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    :arg script:
+    """
+
+    name = "value_count"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, field=field, missing=missing, script=script, **kwargs
+        )
+
+
+class WeightedAvg(Agg[_R]):
+    """
+    A single-value metrics aggregation that computes the weighted average
+    of numeric values that are extracted from the aggregated documents.
+
+    :arg format: A numeric response formatter.
+    :arg value: Configuration for the field that provides the values.
+    :arg value_type:
+    :arg weight: Configuration for the field or script that provides the
+        weights.
+    """
+
+    name = "weighted_avg"
+
+    def __init__(
+        self,
+        *,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        value: Union[
+            "types.WeightedAverageValue", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        value_type: Union[
+            Literal[
+                "string",
+                "long",
+                "double",
+                "number",
+                "date",
+                "date_nanos",
+                "ip",
+                "numeric",
+                "geo_point",
+                "boolean",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        weight: Union[
+            "types.WeightedAverageValue", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            format=format, value=value, value_type=value_type, weight=weight, **kwargs
+        )
+
+
+class VariableWidthHistogram(Bucket[_R]):
+    """
+    A multi-bucket aggregation similar to the histogram, except instead of
+    providing an interval to use as the width of each bucket, a target
+    number of buckets is provided.
+
+    :arg field: The name of the field.
+    :arg buckets: The target number of buckets. Defaults to `10` if
+        omitted.
+    :arg shard_size: The number of buckets that the coordinating node will
+        request from each shard. Defaults to `buckets * 50`.
+    :arg initial_buffer: Specifies the number of individual documents that
+        will be stored in memory on a shard before the initial bucketing
+        algorithm is run. Defaults to `min(10 * shard_size, 50000)`.
+    :arg script:
+    """
+
+    name = "variable_width_histogram"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        buckets: Union[int, "DefaultType"] = DEFAULT,
+        shard_size: Union[int, "DefaultType"] = DEFAULT,
+        initial_buffer: Union[int, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            buckets=buckets,
+            shard_size=shard_size,
+            initial_buffer=initial_buffer,
+            script=script,
+            **kwargs,
+        )
+
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return FieldBucketData(self, search, data)
diff -pruN 8.17.2-2/elasticsearch/dsl/analysis.py 9.2.0-1/elasticsearch/dsl/analysis.py
--- 8.17.2-2/elasticsearch/dsl/analysis.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/analysis.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,341 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, ClassVar, Dict, List, Optional, Union, cast
+
+from . import async_connections, connections
+from .utils import AsyncUsingType, AttrDict, DslBase, UsingType, merge
+
+__all__ = ["tokenizer", "analyzer", "char_filter", "token_filter", "normalizer"]
+
+
+class AnalysisBase:
+    @classmethod
+    def _type_shortcut(
+        cls,
+        name_or_instance: Union[str, "AnalysisBase"],
+        type: Optional[str] = None,
+        **kwargs: Any,
+    ) -> DslBase:
+        if isinstance(name_or_instance, cls):
+            if type or kwargs:
+                raise ValueError(f"{cls.__name__}() cannot accept parameters.")
+            return name_or_instance  # type: ignore[return-value]
+
+        if not (type or kwargs):
+            return cls.get_dsl_class("builtin")(name_or_instance)  # type: ignore[no-any-return, attr-defined]
+
+        return cls.get_dsl_class(type, "custom")(  # type: ignore[no-any-return, attr-defined]
+            name_or_instance, type or "custom", **kwargs
+        )
+
+
+class CustomAnalysis:
+    name = "custom"
+
+    def __init__(self, filter_name: str, builtin_type: str = "custom", **kwargs: Any):
+        self._builtin_type = builtin_type
+        self._name = filter_name
+        super().__init__(**kwargs)
+
+    def to_dict(self) -> Dict[str, Any]:
+        # only name to present in lists
+        return self._name  # type: ignore[return-value]
+
+    def get_definition(self) -> Dict[str, Any]:
+        d = super().to_dict()  # type: ignore[misc]
+        d = d.pop(self.name)
+        d["type"] = self._builtin_type
+        return d  # type: ignore[no-any-return]
+
+
+class CustomAnalysisDefinition(CustomAnalysis):
+    _type_name: str
+    _param_defs: ClassVar[Dict[str, Any]]
+    filter: List[Any]
+    char_filter: List[Any]
+
+    def get_analysis_definition(self) -> Dict[str, Any]:
+        out = {self._type_name: {self._name: self.get_definition()}}
+
+        t = cast("Tokenizer", getattr(self, "tokenizer", None))
+        if "tokenizer" in self._param_defs and hasattr(t, "get_definition"):
+            out["tokenizer"] = {t._name: t.get_definition()}
+
+        filters = {
+            f._name: f.get_definition()
+            for f in self.filter
+            if hasattr(f, "get_definition")
+        }
+        if filters:
+            out["filter"] = filters
+
+        # any sub filter definitions like multiplexers etc?
+        for f in self.filter:
+            if hasattr(f, "get_analysis_definition"):
+                d = f.get_analysis_definition()
+                if d:
+                    merge(out, d, True)
+
+        char_filters = {
+            f._name: f.get_definition()
+            for f in self.char_filter
+            if hasattr(f, "get_definition")
+        }
+        if char_filters:
+            out["char_filter"] = char_filters
+
+        return out
+
+
+class BuiltinAnalysis:
+    name = "builtin"
+
+    def __init__(self, name: str):
+        self._name = name
+        super().__init__()
+
+    def to_dict(self) -> Dict[str, Any]:
+        # only name to present in lists
+        return self._name  # type: ignore[return-value]
+
+
+class Analyzer(AnalysisBase, DslBase):
+    _type_name = "analyzer"
+    name = ""
+
+
+class BuiltinAnalyzer(BuiltinAnalysis, Analyzer):
+    def get_analysis_definition(self) -> Dict[str, Any]:
+        return {}
+
+
+class CustomAnalyzer(CustomAnalysisDefinition, Analyzer):
+    _param_defs = {
+        "filter": {"type": "token_filter", "multi": True},
+        "char_filter": {"type": "char_filter", "multi": True},
+        "tokenizer": {"type": "tokenizer"},
+    }
+
+    def _get_body(
+        self, text: str, explain: bool, attributes: Optional[Dict[str, Any]]
+    ) -> Dict[str, Any]:
+        body = {"text": text, "explain": explain}
+        if attributes:
+            body["attributes"] = attributes
+
+        definition = self.get_analysis_definition()
+        analyzer_def = self.get_definition()
+
+        for section in ("tokenizer", "char_filter", "filter"):
+            if section not in analyzer_def:
+                continue
+            sec_def = definition.get(section, {})
+            sec_names = analyzer_def[section]
+
+            if isinstance(sec_names, str):
+                body[section] = sec_def.get(sec_names, sec_names)
+            else:
+                body[section] = [
+                    sec_def.get(sec_name, sec_name) for sec_name in sec_names
+                ]
+
+        if self._builtin_type != "custom":
+            body["analyzer"] = self._builtin_type
+
+        return body
+
+    def simulate(
+        self,
+        text: str,
+        using: UsingType = "default",
+        explain: bool = False,
+        attributes: Optional[Dict[str, Any]] = None,
+    ) -> AttrDict[Any]:
+        """
+        Use the Analyze API of elasticsearch to test the outcome of this analyzer.
+
+        :arg text: Text to be analyzed
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg explain: will output all token attributes for each token. You can
+            filter token attributes you want to output by setting ``attributes``
+            option.
+        :arg attributes: if ``explain`` is specified, filter the token
+            attributes to return.
+        """
+        es = connections.get_connection(using)
+        return AttrDict(
+            cast(
+                Dict[str, Any],
+                es.indices.analyze(body=self._get_body(text, explain, attributes)),
+            )
+        )
+
+    async def async_simulate(
+        self,
+        text: str,
+        using: AsyncUsingType = "default",
+        explain: bool = False,
+        attributes: Optional[Dict[str, Any]] = None,
+    ) -> AttrDict[Any]:
+        """
+        Use the Analyze API of elasticsearch to test the outcome of this analyzer.
+
+        :arg text: Text to be analyzed
+        :arg using: connection alias to use, defaults to ``'default'``
+        :arg explain: will output all token attributes for each token. You can
+            filter token attributes you want to output by setting ``attributes``
+            option.
+        :arg attributes: if ``explain`` is specified, filter the token
+            attributes to return.
+        """
+        es = async_connections.get_connection(using)
+        return AttrDict(
+            cast(
+                Dict[str, Any],
+                await es.indices.analyze(
+                    body=self._get_body(text, explain, attributes)
+                ),
+            )
+        )
+
+
+class Normalizer(AnalysisBase, DslBase):
+    _type_name = "normalizer"
+    name = ""
+
+
+class BuiltinNormalizer(BuiltinAnalysis, Normalizer):
+    def get_analysis_definition(self) -> Dict[str, Any]:
+        return {}
+
+
+class CustomNormalizer(CustomAnalysisDefinition, Normalizer):
+    _param_defs = {
+        "filter": {"type": "token_filter", "multi": True},
+        "char_filter": {"type": "char_filter", "multi": True},
+    }
+
+
+class Tokenizer(AnalysisBase, DslBase):
+    _type_name = "tokenizer"
+    name = ""
+
+
+class BuiltinTokenizer(BuiltinAnalysis, Tokenizer):
+    pass
+
+
+class CustomTokenizer(CustomAnalysis, Tokenizer):
+    pass
+
+
+class TokenFilter(AnalysisBase, DslBase):
+    _type_name = "token_filter"
+    name = ""
+
+
+class BuiltinTokenFilter(BuiltinAnalysis, TokenFilter):
+    pass
+
+
+class CustomTokenFilter(CustomAnalysis, TokenFilter):
+    pass
+
+
+class MultiplexerTokenFilter(CustomTokenFilter):
+    name = "multiplexer"
+
+    def get_definition(self) -> Dict[str, Any]:
+        d = super(CustomTokenFilter, self).get_definition()
+
+        if "filters" in d:
+            d["filters"] = [
+                # comma delimited string given by user
+                (
+                    fs
+                    if isinstance(fs, str)
+                    else
+                    # list of strings or TokenFilter objects
+                    ", ".join(f.to_dict() if hasattr(f, "to_dict") else f for f in fs)
+                )
+                for fs in self.filters
+            ]
+        return d
+
+    def get_analysis_definition(self) -> Dict[str, Any]:
+        if not hasattr(self, "filters"):
+            return {}
+
+        fs: Dict[str, Any] = {}
+        d = {"filter": fs}
+        for filters in self.filters:
+            if isinstance(filters, str):
+                continue
+            fs.update(
+                {
+                    f._name: f.get_definition()
+                    for f in filters
+                    if hasattr(f, "get_definition")
+                }
+            )
+        return d
+
+
+class ConditionalTokenFilter(CustomTokenFilter):
+    name = "condition"
+
+    def get_definition(self) -> Dict[str, Any]:
+        d = super(CustomTokenFilter, self).get_definition()
+        if "filter" in d:
+            d["filter"] = [
+                f.to_dict() if hasattr(f, "to_dict") else f for f in self.filter
+            ]
+        return d
+
+    def get_analysis_definition(self) -> Dict[str, Any]:
+        if not hasattr(self, "filter"):
+            return {}
+
+        return {
+            "filter": {
+                f._name: f.get_definition()
+                for f in self.filter
+                if hasattr(f, "get_definition")
+            }
+        }
+
+
+class CharFilter(AnalysisBase, DslBase):
+    _type_name = "char_filter"
+    name = ""
+
+
+class BuiltinCharFilter(BuiltinAnalysis, CharFilter):
+    pass
+
+
+class CustomCharFilter(CustomAnalysis, CharFilter):
+    pass
+
+
+# shortcuts for direct use
+analyzer = Analyzer._type_shortcut
+tokenizer = Tokenizer._type_shortcut
+token_filter = TokenFilter._type_shortcut
+char_filter = CharFilter._type_shortcut
+normalizer = Normalizer._type_shortcut
diff -pruN 8.17.2-2/elasticsearch/dsl/async_connections.py 9.2.0-1/elasticsearch/dsl/async_connections.py
--- 8.17.2-2/elasticsearch/dsl/async_connections.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/async_connections.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,37 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Type
+
+from elasticsearch import AsyncElasticsearch
+
+from .connections import Connections
+
+
+class AsyncElasticsearchConnections(Connections[AsyncElasticsearch]):
+    def __init__(
+        self, *, elasticsearch_class: Type[AsyncElasticsearch] = AsyncElasticsearch
+    ):
+        super().__init__(elasticsearch_class=elasticsearch_class)
+
+
+connections = AsyncElasticsearchConnections(elasticsearch_class=AsyncElasticsearch)
+configure = connections.configure
+add_connection = connections.add_connection
+remove_connection = connections.remove_connection
+create_connection = connections.create_connection
+get_connection = connections.get_connection
diff -pruN 8.17.2-2/elasticsearch/dsl/connections.py 9.2.0-1/elasticsearch/dsl/connections.py
--- 8.17.2-2/elasticsearch/dsl/connections.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/connections.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,142 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, Dict, Generic, Type, TypeVar, Union
+
+from elasticsearch import Elasticsearch, __versionstr__
+
+from .serializer import serializer
+
+_T = TypeVar("_T")
+
+
+class Connections(Generic[_T]):
+    """
+    Class responsible for holding connections to different clusters. Used as a
+    singleton in this module.
+    """
+
+    def __init__(self, *, elasticsearch_class: Type[_T]):
+        self._kwargs: Dict[str, Any] = {}
+        self._conns: Dict[str, _T] = {}
+        self.elasticsearch_class: Type[_T] = elasticsearch_class
+
+    def configure(self, **kwargs: Any) -> None:
+        """
+        Configure multiple connections at once, useful for passing in config
+        dictionaries obtained from other sources, like Django's settings or a
+        configuration management tool.
+
+        Example::
+
+            connections.configure(
+                default={'hosts': 'localhost'},
+                dev={'hosts': ['esdev1.example.com:9200'], 'sniff_on_start': True},
+            )
+
+        Connections will only be constructed lazily when requested through
+        ``get_connection``.
+        """
+        for k in list(self._conns):
+            # try and preserve existing client to keep the persistent connections alive
+            if k in self._kwargs and kwargs.get(k, None) == self._kwargs[k]:
+                continue
+            del self._conns[k]
+        self._kwargs = kwargs
+
+    def add_connection(self, alias: str, conn: _T) -> None:
+        """
+        Add a connection object, it will be passed through as-is.
+        """
+        self._conns[alias] = self._with_user_agent(conn)
+
+    def remove_connection(self, alias: str) -> None:
+        """
+        Remove connection from the registry. Raises ``KeyError`` if connection
+        wasn't found.
+        """
+        errors = 0
+        for d in (self._conns, self._kwargs):
+            try:
+                del d[alias]
+            except KeyError:
+                errors += 1
+
+        if errors == 2:
+            raise KeyError(f"There is no connection with alias {alias!r}.")
+
+    def create_connection(self, alias: str = "default", **kwargs: Any) -> _T:
+        """
+        Construct an instance of ``elasticsearch.Elasticsearch`` and register
+        it under given alias.
+        """
+        kwargs.setdefault("serializer", serializer)
+        conn = self._conns[alias] = self.elasticsearch_class(**kwargs)
+        return self._with_user_agent(conn)
+
+    def get_connection(self, alias: Union[str, _T] = "default") -> _T:
+        """
+        Retrieve a connection, construct it if necessary (only configuration
+        was passed to us). If a non-string alias has been passed through we
+        assume it's already a client instance and will just return it as-is.
+
+        Raises ``KeyError`` if no client (or its definition) is registered
+        under the alias.
+        """
+        # do not check isinstance(Elasticsearch) so that people can wrap their
+        # clients
+        if not isinstance(alias, str):
+            return self._with_user_agent(alias)
+
+        # connection already established
+        try:
+            return self._conns[alias]
+        except KeyError:
+            pass
+
+        # if not, try to create it
+        try:
+            return self.create_connection(alias, **self._kwargs[alias])
+        except KeyError:
+            # no connection and no kwargs to set one up
+            raise KeyError(f"There is no connection with alias {alias!r}.")
+
+    def _with_user_agent(self, conn: _T) -> _T:
+        # try to inject our user agent
+        if hasattr(conn, "_headers"):
+            is_frozen = conn._headers.frozen
+            if is_frozen:
+                conn._headers = conn._headers.copy()
+            conn._headers.update(
+                {"user-agent": f"elasticsearch-dsl-py/{__versionstr__}"}
+            )
+            if is_frozen:
+                conn._headers.freeze()
+        return conn
+
+
+class ElasticsearchConnections(Connections[Elasticsearch]):
+    def __init__(self, *, elasticsearch_class: Type[Elasticsearch] = Elasticsearch):
+        super().__init__(elasticsearch_class=elasticsearch_class)
+
+
+connections = ElasticsearchConnections()
+configure = connections.configure
+add_connection = connections.add_connection
+remove_connection = connections.remove_connection
+create_connection = connections.create_connection
+get_connection = connections.get_connection
diff -pruN 8.17.2-2/elasticsearch/dsl/document.py 9.2.0-1/elasticsearch/dsl/document.py
--- 8.17.2-2/elasticsearch/dsl/document.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/document.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,20 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from ._async.document import AsyncDocument  # noqa: F401
+from ._sync.document import Document  # noqa: F401
+from .document_base import InnerDoc, MetaField  # noqa: F401
diff -pruN 8.17.2-2/elasticsearch/dsl/document_base.py 9.2.0-1/elasticsearch/dsl/document_base.py
--- 8.17.2-2/elasticsearch/dsl/document_base.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/document_base.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,686 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import json
+from datetime import date, datetime
+from fnmatch import fnmatch
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    ClassVar,
+    Dict,
+    Generic,
+    List,
+    Optional,
+    Tuple,
+    TypeVar,
+    Union,
+    get_args,
+    overload,
+)
+
+from typing_extensions import _AnnotatedAlias
+
+try:
+    import annotationlib
+except ImportError:
+    annotationlib = None
+
+try:
+    from types import UnionType
+except ImportError:
+    UnionType = None  # type: ignore[assignment, misc]
+
+from typing_extensions import dataclass_transform
+
+from .exceptions import ValidationException
+from .field import Binary, Boolean, Date, Field, Float, Integer, Nested, Object, Text
+from .mapping import Mapping
+from .utils import DOC_META_FIELDS, ObjectBase
+
+if TYPE_CHECKING:
+    from elastic_transport import ObjectApiResponse
+
+    from ..esql.esql import ESQLBase
+    from .index_base import IndexBase
+
+
+class MetaField:
+    def __init__(self, *args: Any, **kwargs: Any):
+        self.args, self.kwargs = args, kwargs
+
+
+class InstrumentedExpression:
+    """Proxy object for a ES|QL expression."""
+
+    def __init__(self, expr: str):
+        self._expr = expr
+
+    def _render_value(self, value: Any) -> str:
+        if isinstance(value, InstrumentedExpression):
+            return str(value)
+        return json.dumps(value)
+
+    def __str__(self) -> str:
+        return self._expr
+
+    def __repr__(self) -> str:
+        return f"InstrumentedExpression[{self._expr}]"
+
+    def __pos__(self) -> "InstrumentedExpression":
+        return self
+
+    def __neg__(self) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"-({self._expr})")
+
+    def __eq__(self, value: Any) -> "InstrumentedExpression":  # type: ignore[override]
+        return InstrumentedExpression(f"{self._expr} == {self._render_value(value)}")
+
+    def __ne__(self, value: Any) -> "InstrumentedExpression":  # type: ignore[override]
+        return InstrumentedExpression(f"{self._expr} != {self._render_value(value)}")
+
+    def __lt__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} < {self._render_value(value)}")
+
+    def __gt__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} > {self._render_value(value)}")
+
+    def __le__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} <= {self._render_value(value)}")
+
+    def __ge__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} >= {self._render_value(value)}")
+
+    def __add__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} + {self._render_value(value)}")
+
+    def __radd__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._render_value(value)} + {self._expr}")
+
+    def __sub__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} - {self._render_value(value)}")
+
+    def __rsub__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._render_value(value)} - {self._expr}")
+
+    def __mul__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} * {self._render_value(value)}")
+
+    def __rmul__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._render_value(value)} * {self._expr}")
+
+    def __truediv__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} / {self._render_value(value)}")
+
+    def __rtruediv__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._render_value(value)} / {self._expr}")
+
+    def __mod__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._expr} % {self._render_value(value)}")
+
+    def __rmod__(self, value: Any) -> "InstrumentedExpression":
+        return InstrumentedExpression(f"{self._render_value(value)} % {self._expr}")
+
+    def is_null(self) -> "InstrumentedExpression":
+        """Compare the expression against NULL."""
+        return InstrumentedExpression(f"{self._expr} IS NULL")
+
+    def is_not_null(self) -> "InstrumentedExpression":
+        """Compare the expression against NOT NULL."""
+        return InstrumentedExpression(f"{self._expr} IS NOT NULL")
+
+    def in_(self, *values: Any) -> "InstrumentedExpression":
+        """Test if the expression equals one of the given values."""
+        rendered_values = ", ".join([f"{value}" for value in values])
+        return InstrumentedExpression(f"{self._expr} IN ({rendered_values})")
+
+    def like(self, *patterns: str) -> "InstrumentedExpression":
+        """Filter the expression using a string pattern."""
+        if len(patterns) == 1:
+            return InstrumentedExpression(
+                f"{self._expr} LIKE {self._render_value(patterns[0])}"
+            )
+        else:
+            return InstrumentedExpression(
+                f'{self._expr} LIKE ({", ".join([self._render_value(p) for p in patterns])})'
+            )
+
+    def rlike(self, *patterns: str) -> "InstrumentedExpression":
+        """Filter the expression using a regular expression."""
+        if len(patterns) == 1:
+            return InstrumentedExpression(
+                f"{self._expr} RLIKE {self._render_value(patterns[0])}"
+            )
+        else:
+            return InstrumentedExpression(
+                f'{self._expr} RLIKE ({", ".join([self._render_value(p) for p in patterns])})'
+            )
+
+    def match(self, query: str) -> "InstrumentedExpression":
+        """Perform a match query on the field."""
+        return InstrumentedExpression(f"{self._expr}:{self._render_value(query)}")
+
+    def asc(self) -> "InstrumentedExpression":
+        """Return the field name representation for ascending sort order.
+
+        For use in ES|QL queries only.
+        """
+        return InstrumentedExpression(f"{self._expr} ASC")
+
+    def desc(self) -> "InstrumentedExpression":
+        """Return the field name representation for descending sort order.
+
+        For use in ES|QL queries only.
+        """
+        return InstrumentedExpression(f"{self._expr} DESC")
+
+    def nulls_first(self) -> "InstrumentedExpression":
+        """Return the field name representation for nulls first sort order.
+
+        For use in ES|QL queries only.
+        """
+        return InstrumentedExpression(f"{self._expr} NULLS FIRST")
+
+    def nulls_last(self) -> "InstrumentedExpression":
+        """Return the field name representation for nulls last sort order.
+
+        For use in ES|QL queries only.
+        """
+        return InstrumentedExpression(f"{self._expr} NULLS LAST")
+
+    def where(
+        self, *expressions: Union[str, "InstrumentedExpression"]
+    ) -> "InstrumentedExpression":
+        """Add a condition to be met for the row to be included.
+
+        Use only in expressions given in the ``STATS`` command.
+        """
+        if len(expressions) == 1:
+            return InstrumentedExpression(f"{self._expr} WHERE {expressions[0]}")
+        else:
+            return InstrumentedExpression(
+                f'{self._expr} WHERE {" AND ".join([f"({expr})" for expr in expressions])}'
+            )
+
+
+E = InstrumentedExpression
+
+
+class InstrumentedField(InstrumentedExpression):
+    """Proxy object for a mapped document field.
+
+    An object of this instance is returned when a field is accessed as a class
+    attribute of a ``Document`` or ``InnerDoc`` subclass. These objects can
+    be used in any situation in which a reference to a field is required, such
+    as when specifying sort options in a search::
+
+        class MyDocument(Document):
+            name: str
+
+        s = MyDocument.search()
+        s = s.sort(-MyDocument.name)  # sort by name in descending order
+    """
+
+    def __init__(self, name: str, field: Optional[Field]):
+        super().__init__(name)
+        self._field = field
+
+    # note that the return value type here assumes classes will only be used to
+    # access fields (I haven't found a way to make this type dynamic based on a
+    # decision taken at runtime)
+    def __getattr__(self, attr: str) -> "InstrumentedField":
+        try:
+            # first let's see if this is an attribute of this object
+            return super().__getattribute__(attr)  # type: ignore[no-any-return]
+        except AttributeError:
+            if self._field:
+                try:
+                    # next we see if we have a sub-field with this name
+                    return InstrumentedField(f"{self._expr}.{attr}", self._field[attr])
+                except KeyError:
+                    # lastly we let the wrapped field resolve this attribute
+                    return getattr(self._field, attr)  # type: ignore[no-any-return]
+            else:
+                raise
+
+    def __pos__(self) -> str:  # type: ignore[override]
+        """Return the field name representation for ascending sort order"""
+        return f"{self._expr}"
+
+    def __neg__(self) -> str:  # type: ignore[override]
+        """Return the field name representation for descending sort order"""
+        return f"-{self._expr}"
+
+    def __str__(self) -> str:
+        return self._expr
+
+    def __repr__(self) -> str:
+        return f"InstrumentedField[{self._expr}]"
+
+
+class DocumentMeta(type):
+    _doc_type: "DocumentOptions"
+    _index: "IndexBase"
+
+    def __new__(
+        cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]
+    ) -> "DocumentMeta":
+        # DocumentMeta filters attrs in place
+        attrs["_doc_type"] = DocumentOptions(name, bases, attrs)
+        return super().__new__(cls, name, bases, attrs)
+
+    def __getattr__(cls, attr: str) -> Any:
+        if attr in cls._doc_type.mapping:
+            return InstrumentedField(attr, cls._doc_type.mapping[attr])
+        return super().__getattribute__(attr)
+
+
+class DocumentOptions:
+    type_annotation_map = {
+        int: (Integer, {}),
+        float: (Float, {}),
+        bool: (Boolean, {}),
+        str: (Text, {}),
+        bytes: (Binary, {}),
+        datetime: (Date, {}),
+        date: (Date, {"format": "yyyy-MM-dd"}),
+    }
+
+    def __init__(self, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]):
+        meta = attrs.pop("Meta", None)
+
+        # create the mapping instance
+        self.mapping: Mapping = getattr(meta, "mapping", Mapping())
+
+        # register the document's fields, which can be given in a few formats:
+        #
+        # class MyDocument(Document):
+        #     # required field using native typing
+        #     # (str, int, float, bool, datetime, date)
+        #     field1: str
+        #
+        #     # optional field using native typing
+        #     field2: Optional[datetime]
+        #
+        #     # array field using native typing
+        #     field3: list[int]
+        #
+        #     # sub-object, same as Object(MyInnerDoc)
+        #     field4: MyInnerDoc
+        #
+        #     # nested sub-objects, same as Nested(MyInnerDoc)
+        #     field5: list[MyInnerDoc]
+        #
+        #     # use typing, but override with any stock or custom field
+        #     field6: bool = MyCustomField()
+        #
+        #     # best mypy and pyright support and dataclass-like behavior
+        #     field7: M[date]
+        #     field8: M[str] = mapped_field(MyCustomText(), default="foo")
+        #
+        #     # legacy format without Python typing
+        #     field9 = Text()
+        #
+        #     # ignore attributes
+        #     field10: ClassVar[string] = "a regular class variable"
+        annotations = attrs.get("__annotations__", {})
+        if not annotations and annotationlib:
+            # Python 3.14+ uses annotationlib
+            annotate = annotationlib.get_annotate_from_class_namespace(attrs)
+            if annotate:
+                annotations = (
+                    annotationlib.call_annotate_function(
+                        annotate, format=annotationlib.Format.VALUE
+                    )
+                    or {}
+                )
+        fields = {n for n in attrs if isinstance(attrs[n], Field)}
+        fields.update(annotations.keys())
+        field_defaults = {}
+        for name in fields:
+            value: Any = None
+            required = None
+            multi = None
+            if name in annotations:
+                # the field has a type annotation, so next we try to figure out
+                # what field type we can use
+                type_ = annotations[name]
+                type_metadata = []
+                if isinstance(type_, _AnnotatedAlias):
+                    type_metadata = type_.__metadata__
+                    type_ = type_.__origin__
+                skip = False
+                required = True
+                multi = False
+                while hasattr(type_, "__origin__"):
+                    if type_.__origin__ == ClassVar:
+                        skip = True
+                        break
+                    elif type_.__origin__ == Mapped:
+                        # M[type] -> extract the wrapped type
+                        type_ = type_.__args__[0]
+                    elif type_.__origin__ == Union:
+                        if len(type_.__args__) == 2 and type_.__args__[1] is type(None):
+                            # Optional[type] -> mark instance as optional
+                            required = False
+                            type_ = type_.__args__[0]
+                        else:
+                            raise TypeError("Unsupported union")
+                    elif type_.__origin__ in [list, List]:
+                        # List[type] -> mark instance as multi
+                        multi = True
+                        required = False
+                        type_ = type_.__args__[0]
+                    else:
+                        break
+                if skip or type_ == ClassVar:
+                    # skip ClassVar attributes
+                    continue
+                if type(type_) is UnionType:
+                    # a union given with the pipe syntax
+                    args = get_args(type_)
+                    if len(args) == 2 and args[1] is type(None):
+                        required = False
+                        type_ = type_.__args__[0]
+                    else:
+                        raise TypeError("Unsupported union")
+                field = None
+                field_args: List[Any] = []
+                field_kwargs: Dict[str, Any] = {}
+                if isinstance(type_, type) and issubclass(type_, InnerDoc):
+                    # object or nested field
+                    field = Nested if multi else Object
+                    field_args = [type_]
+                elif type_ in self.type_annotation_map:
+                    # use best field type for the type hint provided
+                    field, field_kwargs = self.type_annotation_map[type_]  # type: ignore[assignment]
+
+                # if this field does not have a right-hand value, we look in the metadata
+                # of the annotation to see if we find it there
+                for md in type_metadata:
+                    if isinstance(md, (_FieldMetadataDict, Field)):
+                        attrs[name] = md
+
+                if field:
+                    field_kwargs = {
+                        "multi": multi,
+                        "required": required,
+                        **field_kwargs,
+                    }
+                    value = field(*field_args, **field_kwargs)
+
+            if name in attrs:
+                # this field has a right-side value, which can be field
+                # instance on its own or wrapped with mapped_field()
+                attr_value = attrs[name]
+                if isinstance(attr_value, _FieldMetadataDict):
+                    # the mapped_field() wrapper function was used so we need
+                    # to look for the field instance and also record any
+                    # dataclass-style defaults
+                    if attr_value.get("exclude"):
+                        # skip this field
+                        continue
+                    attr_value = attrs[name].get("_field")
+                    default_value = attrs[name].get("default") or attrs[name].get(
+                        "default_factory"
+                    )
+                    if default_value:
+                        field_defaults[name] = default_value
+                if isinstance(attr_value, Field):
+                    value = attr_value
+                    if required is not None:
+                        value._required = required
+                    if multi is not None:
+                        value._multi = multi
+
+            if value is None:
+                raise TypeError(f"Cannot map field {name}")
+
+            self.mapping.field(name, value)
+            if name in attrs:
+                del attrs[name]
+
+        # store dataclass-style defaults for ObjectBase.__init__ to assign
+        attrs["_defaults"] = field_defaults
+
+        # add all the mappings for meta fields
+        for name in dir(meta):
+            if isinstance(getattr(meta, name, None), MetaField):
+                params = getattr(meta, name)
+                self.mapping.meta(name, *params.args, **params.kwargs)
+
+        # document inheritance - include the fields from parents' mappings
+        for b in bases:
+            if hasattr(b, "_doc_type") and hasattr(b._doc_type, "mapping"):
+                self.mapping.update(b._doc_type.mapping, update_only=True)
+
+    @property
+    def name(self) -> str:
+        return self.mapping.properties.name
+
+
+_FieldType = TypeVar("_FieldType")
+
+
+class Mapped(Generic[_FieldType]):
+    """Class that represents the type of a mapped field.
+
+    This class can be used as an optional wrapper on a field type to help type
+    checkers assign the correct type when the field is used as a class
+    attribute.
+
+    Consider the following definitions::
+
+        class MyDocument(Document):
+            first: str
+            second: M[str]
+
+        mydoc = MyDocument(first="1", second="2")
+
+    Type checkers have no trouble inferring the type of both ``mydoc.first``
+    and ``mydoc.second`` as ``str``, but while ``MyDocument.first`` will be
+    incorrectly typed as ``str``, ``MyDocument.second`` should be assigned the
+    correct ``InstrumentedField`` type.
+    """
+
+    __slots__: Dict[str, Any] = {}
+
+    if TYPE_CHECKING:
+
+        @overload
+        def __get__(self, instance: None, owner: Any) -> InstrumentedField: ...
+
+        @overload
+        def __get__(self, instance: object, owner: Any) -> _FieldType: ...
+
+        def __get__(
+            self, instance: Optional[object], owner: Any
+        ) -> Union[InstrumentedField, _FieldType]: ...
+
+        def __set__(self, instance: Optional[object], value: _FieldType) -> None: ...
+
+        def __delete__(self, instance: Any) -> None: ...
+
+
+M = Mapped
+
+
+class _FieldMetadataDict(dict[str, Any]):
+    """This class is used to identify metadata returned by the `mapped_field()` function."""
+
+    pass
+
+
+def mapped_field(
+    field: Optional[Field] = None,
+    *,
+    init: bool = True,
+    default: Any = None,
+    default_factory: Optional[Callable[[], Any]] = None,
+    exclude: bool = False,
+    **kwargs: Any,
+) -> Any:
+    """Construct a field using dataclass behaviors
+
+    This function can be used in the right side of a document field definition
+    as a wrapper for the field instance or as a way to provide dataclass-compatible
+    options.
+
+    :param field: The instance of ``Field`` to use for this field. If not provided,
+        an instance that is appropriate for the type given to the field is used.
+    :param init: a value of ``True`` adds this field to the constructor, and a
+        value of ``False`` omits it from it. The default is ``True``.
+    :param default: a default value to use for this field when one is not provided
+        explicitly.
+    :param default_factory: a callable that returns a default value for the field,
+        when one isn't provided explicitly. Only one of ``factory`` and
+        ``default_factory`` can be used.
+    :param exclude: Set to ``True`` to exclude this field from the Elasticsearch
+        index.
+    """
+    return _FieldMetadataDict(
+        _field=field,
+        init=init,
+        default=default,
+        default_factory=default_factory,
+        exclude=exclude,
+        **kwargs,
+    )
+
+
+@dataclass_transform(field_specifiers=(mapped_field,))
+class InnerDoc(ObjectBase, metaclass=DocumentMeta):
+    """
+    Common class for inner documents like Object or Nested
+    """
+
+    @classmethod
+    def from_es(
+        cls,
+        data: Union[Dict[str, Any], "ObjectApiResponse[Any]"],
+        data_only: bool = False,
+    ) -> "InnerDoc":
+        if data_only:
+            data = {"_source": data}
+        return super().from_es(data)
+
+
+class DocumentBase(ObjectBase):
+    """
+    Model-like class for persisting documents in elasticsearch.
+    """
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        if cls._index._name is None:
+            return True
+        return fnmatch(hit.get("_index", ""), cls._index._name)
+
+    @classmethod
+    def _default_index(cls, index: Optional[str] = None) -> str:
+        return index or cls._index._name
+
+    def _get_index(
+        self, index: Optional[str] = None, required: bool = True
+    ) -> Optional[str]:
+        if index is None:
+            index = getattr(self.meta, "index", None)
+        if index is None:
+            index = getattr(self._index, "_name", None)
+        if index is None and required:
+            raise ValidationException("No index")
+        if index and "*" in index:
+            raise ValidationException("You cannot write to a wildcard index.")
+        return index
+
+    def __repr__(self) -> str:
+        return "{}({})".format(
+            self.__class__.__name__,
+            ", ".join(
+                f"{key}={getattr(self.meta, key)!r}"
+                for key in ("index", "id")
+                if key in self.meta
+            ),
+        )
+
+    def to_dict(self, include_meta: bool = False, skip_empty: bool = True) -> Dict[str, Any]:  # type: ignore[override]
+        """
+        Serialize the instance into a dictionary so that it can be saved in elasticsearch.
+
+        :arg include_meta: if set to ``True`` will include all the metadata
+            (``_index``, ``_id`` etc). Otherwise just the document's
+            data is serialized. This is useful when passing multiple instances into
+            ``elasticsearch.helpers.bulk``.
+        :arg skip_empty: if set to ``False`` will cause empty values (``None``,
+            ``[]``, ``{}``) to be left on the document. Those values will be
+            stripped out otherwise as they make no difference in elasticsearch.
+        """
+        d = super().to_dict(skip_empty=skip_empty)
+        if not include_meta:
+            return d
+
+        meta = {"_" + k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta}
+
+        # in case of to_dict include the index unlike save/update/delete
+        index = self._get_index(required=False)
+        if index is not None:
+            meta["_index"] = index
+
+        meta["_source"] = d
+        return meta
+
+    @classmethod
+    def _get_field_names(
+        cls, for_esql: bool = False, nested_class: Optional[type[InnerDoc]] = None
+    ) -> List[str]:
+        """Return the list of field names used by this document.
+        If the document has nested objects, their fields are reported using dot
+        notation. If the ``for_esql`` argument is set to ``True``, the list omits
+        nested fields, which are currently unsupported in ES|QL.
+        """
+        fields = []
+        class_ = nested_class or cls
+        for field_name in class_._doc_type.mapping:
+            field = class_._doc_type.mapping[field_name]
+            if isinstance(field, Object):
+                if for_esql and isinstance(field, Nested):
+                    # ES|QL does not recognize Nested fields at this time
+                    continue
+                sub_fields = cls._get_field_names(
+                    for_esql=for_esql, nested_class=field._doc_class
+                )
+                for sub_field in sub_fields:
+                    fields.append(f"{field_name}.{sub_field}")
+            else:
+                fields.append(field_name)
+        return fields
+
+    @classmethod
+    def esql_from(cls) -> "ESQLBase":
+        """Return a base ES|QL query for instances of this document class.
+
+        The returned query is initialized with ``FROM`` and ``KEEP`` statements,
+        and can be completed as desired.
+        """
+        from ..esql import ESQL  # here to avoid circular imports
+
+        return (
+            ESQL.from_(cls)
+            .metadata("_id")
+            .keep("_id", *tuple(cls._get_field_names(for_esql=True)))
+        )
diff -pruN 8.17.2-2/elasticsearch/dsl/exceptions.py 9.2.0-1/elasticsearch/dsl/exceptions.py
--- 8.17.2-2/elasticsearch/dsl/exceptions.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/exceptions.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,32 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+class ElasticsearchDslException(Exception):
+    pass
+
+
+class UnknownDslObject(ElasticsearchDslException):
+    pass
+
+
+class ValidationException(ValueError, ElasticsearchDslException):
+    pass
+
+
+class IllegalOperation(ElasticsearchDslException):
+    pass
diff -pruN 8.17.2-2/elasticsearch/dsl/faceted_search.py 9.2.0-1/elasticsearch/dsl/faceted_search.py
--- 8.17.2-2/elasticsearch/dsl/faceted_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/faceted_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,28 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from ._async.faceted_search import AsyncFacetedSearch  # noqa: F401
+from ._sync.faceted_search import FacetedSearch  # noqa: F401
+from .faceted_search_base import (  # noqa: F401
+    DateHistogramFacet,
+    Facet,
+    FacetedResponse,
+    HistogramFacet,
+    NestedFacet,
+    RangeFacet,
+    TermsFacet,
+)
diff -pruN 8.17.2-2/elasticsearch/dsl/faceted_search_base.py 9.2.0-1/elasticsearch/dsl/faceted_search_base.py
--- 8.17.2-2/elasticsearch/dsl/faceted_search_base.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/faceted_search_base.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,489 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime, timedelta
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Generic,
+    List,
+    Optional,
+    Sequence,
+    Tuple,
+    Type,
+    Union,
+    cast,
+)
+
+from typing_extensions import Self
+
+from .aggs import A, Agg
+from .query import MatchAll, Nested, Query, Range, Terms
+from .response import Response
+from .utils import _R, AttrDict
+
+if TYPE_CHECKING:
+    from .document_base import DocumentBase
+    from .response.aggs import BucketData
+    from .search_base import SearchBase
+
+FilterValueType = Union[str, int, float, bool]
+
+__all__ = [
+    "FacetedSearchBase",
+    "HistogramFacet",
+    "TermsFacet",
+    "DateHistogramFacet",
+    "RangeFacet",
+    "NestedFacet",
+]
+
+
+class Facet(Generic[_R]):
+    """
+    A facet on faceted search. Wraps and aggregation and provides functionality
+    to create a filter for selected values and return a list of facet values
+    from the result of the aggregation.
+    """
+
+    agg_type: str = ""
+
+    def __init__(
+        self, metric: Optional[Agg[_R]] = None, metric_sort: str = "desc", **kwargs: Any
+    ):
+        self.filter_values = ()
+        self._params = kwargs
+        self._metric = metric
+        if metric and metric_sort:
+            self._params["order"] = {"metric": metric_sort}
+
+    def get_aggregation(self) -> Agg[_R]:
+        """
+        Return the aggregation object.
+        """
+        agg: Agg[_R] = A(self.agg_type, **self._params)
+        if self._metric:
+            agg.metric("metric", self._metric)
+        return agg
+
+    def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]:
+        """
+        Construct a filter.
+        """
+        if not filter_values:
+            return None
+
+        f = self.get_value_filter(filter_values[0])
+        for v in filter_values[1:]:
+            f |= self.get_value_filter(v)
+        return f
+
+    def get_value_filter(self, filter_value: FilterValueType) -> Query:  # type: ignore[empty-body]
+        """
+        Construct a filter for an individual value
+        """
+        pass
+
+    def is_filtered(self, key: str, filter_values: List[FilterValueType]) -> bool:
+        """
+        Is a filter active on the given key.
+        """
+        return key in filter_values
+
+    def get_value(self, bucket: "BucketData[_R]") -> Any:
+        """
+        return a value representing a bucket. Its key as default.
+        """
+        return bucket["key"]
+
+    def get_metric(self, bucket: "BucketData[_R]") -> int:
+        """
+        Return a metric, by default doc_count for a bucket.
+        """
+        if self._metric:
+            return cast(int, bucket["metric"]["value"])
+        return cast(int, bucket["doc_count"])
+
+    def get_values(
+        self, data: "BucketData[_R]", filter_values: List[FilterValueType]
+    ) -> List[Tuple[Any, int, bool]]:
+        """
+        Turn the raw bucket data into a list of tuples containing the key,
+        number of documents and a flag indicating whether this value has been
+        selected or not.
+        """
+        out = []
+        for bucket in data.buckets:
+            b = cast("BucketData[_R]", bucket)
+            key = self.get_value(b)
+            out.append((key, self.get_metric(b), self.is_filtered(key, filter_values)))
+        return out
+
+
+class TermsFacet(Facet[_R]):
+    agg_type = "terms"
+
+    def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]:
+        """Create a terms filter instead of bool containing term filters."""
+        if filter_values:
+            return Terms(self._params["field"], filter_values, _expand__to_dot=False)
+        return None
+
+
+class RangeFacet(Facet[_R]):
+    agg_type = "range"
+
+    def _range_to_dict(
+        self, range: Tuple[Any, Tuple[Optional[int], Optional[int]]]
+    ) -> Dict[str, Any]:
+        key, _range = range
+        out: Dict[str, Any] = {"key": key}
+        if _range[0] is not None:
+            out["from"] = _range[0]
+        if _range[1] is not None:
+            out["to"] = _range[1]
+        return out
+
+    def __init__(
+        self,
+        ranges: Sequence[Tuple[Any, Tuple[Optional[int], Optional[int]]]],
+        **kwargs: Any,
+    ):
+        super().__init__(**kwargs)
+        self._params["ranges"] = list(map(self._range_to_dict, ranges))
+        self._params["keyed"] = False
+        self._ranges = dict(ranges)
+
+    def get_value_filter(self, filter_value: FilterValueType) -> Query:
+        f, t = self._ranges[filter_value]
+        limits: Dict[str, Any] = {}
+        if f is not None:
+            limits["gte"] = f
+        if t is not None:
+            limits["lt"] = t
+
+        return Range(self._params["field"], limits, _expand__to_dot=False)
+
+
+class HistogramFacet(Facet[_R]):
+    agg_type = "histogram"
+
+    def get_value_filter(self, filter_value: FilterValueType) -> Range:
+        return Range(
+            self._params["field"],
+            {
+                "gte": filter_value,
+                "lt": filter_value + self._params["interval"],
+            },
+            _expand__to_dot=False,
+        )
+
+
+def _date_interval_year(d: datetime) -> datetime:
+    return d.replace(
+        year=d.year + 1, day=(28 if d.month == 2 and d.day == 29 else d.day)
+    )
+
+
+def _date_interval_month(d: datetime) -> datetime:
+    return (d + timedelta(days=32)).replace(day=1)
+
+
+def _date_interval_week(d: datetime) -> datetime:
+    return d + timedelta(days=7)
+
+
+def _date_interval_day(d: datetime) -> datetime:
+    return d + timedelta(days=1)
+
+
+def _date_interval_hour(d: datetime) -> datetime:
+    return d + timedelta(hours=1)
+
+
+class DateHistogramFacet(Facet[_R]):
+    agg_type = "date_histogram"
+
+    DATE_INTERVALS = {
+        "year": _date_interval_year,
+        "1Y": _date_interval_year,
+        "month": _date_interval_month,
+        "1M": _date_interval_month,
+        "week": _date_interval_week,
+        "1w": _date_interval_week,
+        "day": _date_interval_day,
+        "1d": _date_interval_day,
+        "hour": _date_interval_hour,
+        "1h": _date_interval_hour,
+    }
+
+    def __init__(self, **kwargs: Any):
+        kwargs.setdefault("min_doc_count", 0)
+        super().__init__(**kwargs)
+
+    def get_value(self, bucket: "BucketData[_R]") -> Any:
+        if not isinstance(bucket["key"], datetime):
+            # Elasticsearch returns key=None instead of 0 for date 1970-01-01,
+            # so we need to set key to 0 to avoid TypeError exception
+            if bucket["key"] is None:
+                bucket["key"] = 0
+            # Preserve milliseconds in the datetime
+            return datetime.utcfromtimestamp(int(cast(int, bucket["key"])) / 1000.0)
+        else:
+            return bucket["key"]
+
+    def get_value_filter(self, filter_value: Any) -> Range:
+        for interval_type in ("calendar_interval", "fixed_interval"):
+            if interval_type in self._params:
+                break
+        else:
+            interval_type = "interval"
+
+        return Range(
+            self._params["field"],
+            {
+                "gte": filter_value,
+                "lt": self.DATE_INTERVALS[self._params[interval_type]](filter_value),
+            },
+            _expand__to_dot=False,
+        )
+
+
+class NestedFacet(Facet[_R]):
+    agg_type = "nested"
+
+    def __init__(self, path: str, nested_facet: Facet[_R]):
+        self._path = path
+        self._inner = nested_facet
+        super().__init__(path=path, aggs={"inner": nested_facet.get_aggregation()})
+
+    def get_values(
+        self, data: "BucketData[_R]", filter_values: List[FilterValueType]
+    ) -> List[Tuple[Any, int, bool]]:
+        return self._inner.get_values(data.inner, filter_values)
+
+    def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]:
+        inner_q = self._inner.add_filter(filter_values)
+        if inner_q:
+            return Nested(path=self._path, query=inner_q)
+        return None
+
+
+class FacetedResponse(Response[_R]):
+    if TYPE_CHECKING:
+        _faceted_search: "FacetedSearchBase[_R]"
+        _facets: Dict[str, List[Tuple[Any, int, bool]]]
+
+    @property
+    def query_string(self) -> Optional[Union[str, Query]]:
+        return self._faceted_search._query
+
+    @property
+    def facets(self) -> Dict[str, List[Tuple[Any, int, bool]]]:
+        if not hasattr(self, "_facets"):
+            super(AttrDict, self).__setattr__("_facets", AttrDict({}))
+            for name, facet in self._faceted_search.facets.items():
+                self._facets[name] = facet.get_values(
+                    getattr(getattr(self.aggregations, "_filter_" + name), name),
+                    self._faceted_search.filter_values.get(name, []),
+                )
+        return self._facets
+
+
+class FacetedSearchBase(Generic[_R]):
+    """
+    Abstraction for creating faceted navigation searches that takes care of
+    composing the queries, aggregations and filters as needed as well as
+    presenting the results in an easy-to-consume fashion::
+
+        class BlogSearch(FacetedSearch):
+            index = 'blogs'
+            doc_types = [Blog, Post]
+            fields = ['title^5', 'category', 'description', 'body']
+
+            facets = {
+                'type': TermsFacet(field='_type'),
+                'category': TermsFacet(field='category'),
+                'weekly_posts': DateHistogramFacet(field='published_from', interval='week')
+            }
+
+            def search(self):
+                ' Override search to add your own filters '
+                s = super(BlogSearch, self).search()
+                return s.filter('term', published=True)
+
+        # when using:
+        blog_search = BlogSearch("web framework", filters={"category": "python"})
+
+        # supports pagination
+        blog_search[10:20]
+
+        response = blog_search.execute()
+
+        # easy access to aggregation results:
+        for category, hit_count, is_selected in response.facets.category:
+            print(
+                "Category %s has %d hits%s." % (
+                    category,
+                    hit_count,
+                    ' and is chosen' if is_selected else ''
+                )
+            )
+
+    """
+
+    index: Optional[str] = None
+    doc_types: Optional[List[Union[str, Type["DocumentBase"]]]] = None
+    fields: Sequence[str] = []
+    facets: Dict[str, Facet[_R]] = {}
+    using = "default"
+
+    if TYPE_CHECKING:
+
+        def search(self) -> "SearchBase[_R]": ...
+
+    def __init__(
+        self,
+        query: Optional[Union[str, Query]] = None,
+        filters: Dict[str, FilterValueType] = {},
+        sort: Sequence[str] = [],
+    ):
+        """
+        :arg query: the text to search for
+        :arg filters: facet values to filter
+        :arg sort: sort information to be passed to :class:`~elasticsearch.dsl.Search`
+        """
+        self._query = query
+        self._filters: Dict[str, Query] = {}
+        self._sort = sort
+        self.filter_values: Dict[str, List[FilterValueType]] = {}
+        for name, value in filters.items():
+            self.add_filter(name, value)
+
+        self._s = self.build_search()
+
+    def __getitem__(self, k: Union[int, slice]) -> Self:
+        self._s = self._s[k]
+        return self
+
+    def add_filter(
+        self, name: str, filter_values: Union[FilterValueType, List[FilterValueType]]
+    ) -> None:
+        """
+        Add a filter for a facet.
+        """
+        # normalize the value into a list
+        if not isinstance(filter_values, (tuple, list)):
+            if filter_values is None:
+                return
+            filter_values = [
+                filter_values,
+            ]
+
+        # remember the filter values for use in FacetedResponse
+        self.filter_values[name] = filter_values
+
+        # get the filter from the facet
+        f = self.facets[name].add_filter(filter_values)
+        if f is None:
+            return
+
+        self._filters[name] = f
+
+    def query(
+        self, search: "SearchBase[_R]", query: Union[str, Query]
+    ) -> "SearchBase[_R]":
+        """
+        Add query part to ``search``.
+
+        Override this if you wish to customize the query used.
+        """
+        if query:
+            if self.fields:
+                return search.query("multi_match", fields=self.fields, query=query)
+            else:
+                return search.query("multi_match", query=query)
+        return search
+
+    def aggregate(self, search: "SearchBase[_R]") -> None:
+        """
+        Add aggregations representing the facets selected, including potential
+        filters.
+        """
+        for f, facet in self.facets.items():
+            agg = facet.get_aggregation()
+            agg_filter: Query = MatchAll()
+            for field, filter in self._filters.items():
+                if f == field:
+                    continue
+                agg_filter &= filter
+            search.aggs.bucket("_filter_" + f, "filter", filter=agg_filter).bucket(
+                f, agg
+            )
+
+    def filter(self, search: "SearchBase[_R]") -> "SearchBase[_R]":
+        """
+        Add a ``post_filter`` to the search request narrowing the results based
+        on the facet filters.
+        """
+        if not self._filters:
+            return search
+
+        post_filter: Query = MatchAll()
+        for f in self._filters.values():
+            post_filter &= f
+        return search.post_filter(post_filter)
+
+    def highlight(self, search: "SearchBase[_R]") -> "SearchBase[_R]":
+        """
+        Add highlighting for all the fields
+        """
+        return search.highlight(
+            *(f if "^" not in f else f.split("^", 1)[0] for f in self.fields)
+        )
+
+    def sort(self, search: "SearchBase[_R]") -> "SearchBase[_R]":
+        """
+        Add sorting information to the request.
+        """
+        if self._sort:
+            search = search.sort(*self._sort)
+        return search
+
+    def params(self, **kwargs: Any) -> None:
+        """
+        Specify query params to be used when executing the search. All the
+        keyword arguments will override the current values. See
+        https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.search
+        for all available parameters.
+        """
+        self._s = self._s.params(**kwargs)
+
+    def build_search(self) -> "SearchBase[_R]":
+        """
+        Construct the ``Search`` object.
+        """
+        s = self.search()
+        if self._query is not None:
+            s = self.query(s, self._query)
+        s = self.filter(s)
+        if self.fields:
+            s = self.highlight(s)
+        s = self.sort(s)
+        self.aggregate(s)
+        return s
diff -pruN 8.17.2-2/elasticsearch/dsl/field.py 9.2.0-1/elasticsearch/dsl/field.py
--- 8.17.2-2/elasticsearch/dsl/field.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/field.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,4627 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import base64
+import collections.abc
+import ipaddress
+from copy import deepcopy
+from datetime import date, datetime
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Iterable,
+    Iterator,
+    Literal,
+    Mapping,
+    Optional,
+    Sequence,
+    Tuple,
+    Type,
+    Union,
+    cast,
+)
+
+from dateutil import parser, tz
+from elastic_transport.client_utils import DEFAULT, DefaultType
+
+from .exceptions import ValidationException
+from .query import Q
+from .utils import AttrDict, AttrList, DslBase
+from .wrappers import Range
+
+if TYPE_CHECKING:
+    from datetime import tzinfo
+    from ipaddress import IPv4Address, IPv6Address
+
+    from _operator import _SupportsComparison
+
+    from . import types
+    from .document import InnerDoc
+    from .document_base import InstrumentedField
+    from .mapping_base import MappingBase
+    from .query import Query
+
+unicode = str
+
+
+def construct_field(
+    name_or_field: Union[
+        str,
+        "Field",
+        Dict[str, Any],
+    ],
+    **params: Any,
+) -> "Field":
+    # {"type": "text", "analyzer": "snowball"}
+    if isinstance(name_or_field, collections.abc.Mapping):
+        if params:
+            raise ValueError(
+                "construct_field() cannot accept parameters when passing in a dict."
+            )
+        params = deepcopy(name_or_field)
+        if "type" not in params:
+            # inner object can be implicitly defined
+            if "properties" in params:
+                name = "object"
+            else:
+                raise ValueError('construct_field() needs to have a "type" key.')
+        else:
+            name = params.pop("type")
+        return Field.get_dsl_class(name)(**params)
+
+    # Text()
+    if isinstance(name_or_field, Field):
+        if params:
+            raise ValueError(
+                "construct_field() cannot accept parameters "
+                "when passing in a construct_field object."
+            )
+        return name_or_field
+
+    # "text", analyzer="snowball"
+    return Field.get_dsl_class(name_or_field)(**params)
+
+
+class Field(DslBase):
+    _type_name = "field"
+    _type_shortcut = staticmethod(construct_field)
+    # all fields can be multifields
+    _param_defs = {"fields": {"type": "field", "hash": True}}
+    name = ""
+    _coerce = False
+
+    def __init__(
+        self, multi: bool = False, required: bool = False, *args: Any, **kwargs: Any
+    ):
+        """
+        :arg bool multi: specifies whether field can contain array of values
+        :arg bool required: specifies whether field is required
+        """
+        self._multi = multi
+        self._required = required
+        super().__init__(*args, **kwargs)
+
+    def __getitem__(self, subfield: str) -> "Field":
+        return cast(Field, self._params.get("fields", {})[subfield])
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Any:
+        return data
+
+    def _safe_serialize(self, data: Any, skip_empty: bool) -> Any:
+        try:
+            return self._serialize(data, skip_empty)
+        except TypeError:
+            # older method signature, without skip_empty
+            return self._serialize(data)  # type: ignore[call-arg]
+
+    def _deserialize(self, data: Any) -> Any:
+        return data
+
+    def _empty(self) -> Optional[Any]:
+        return None
+
+    def empty(self) -> Optional[Any]:
+        if self._multi:
+            return AttrList([])
+        return self._empty()
+
+    def serialize(self, data: Any, skip_empty: bool = True) -> Any:
+        if isinstance(data, (list, AttrList, tuple)):
+            return list(
+                map(
+                    self._safe_serialize,
+                    cast(Iterable[Any], data),
+                    [skip_empty] * len(data),
+                )
+            )
+        return self._safe_serialize(data, skip_empty)
+
+    def deserialize(self, data: Any) -> Any:
+        if isinstance(data, (list, AttrList, tuple)):
+            data = [
+                None if d is None else self._deserialize(d)
+                for d in cast(Iterable[Any], data)
+            ]
+            return data
+        if data is None:
+            return None
+        return self._deserialize(data)
+
+    def clean(self, data: Any) -> Any:
+        if data is not None:
+            data = self.deserialize(data)
+        if data in (None, [], {}) and self._required:
+            raise ValidationException("Value required for this field.")
+        return data
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super().to_dict()
+        name, value = cast(Tuple[str, Dict[str, Any]], d.popitem())
+        value["type"] = name
+        return value
+
+
+class CustomField(Field):
+    name = "custom"
+    _coerce = True
+
+    def to_dict(self) -> Dict[str, Any]:
+        if isinstance(self.builtin_type, Field):
+            return self.builtin_type.to_dict()
+
+        d = super().to_dict()
+        d["type"] = self.builtin_type
+        return d
+
+
+class RangeField(Field):
+    _coerce = True
+    _core_field: Optional[Field] = None
+
+    def _deserialize(self, data: Any) -> Range["_SupportsComparison"]:
+        if isinstance(data, Range):
+            return data
+        data = {k: self._core_field.deserialize(v) for k, v in data.items()}  # type: ignore[union-attr]
+        return Range(data)
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Optional[Dict[str, Any]]:
+        if data is None:
+            return None
+        if not isinstance(data, collections.abc.Mapping):
+            data = data.to_dict()
+        return {k: self._core_field.serialize(v) for k, v in data.items()}  # type: ignore[union-attr]
+
+
+class Float(Field):
+    """
+    :arg null_value:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "float"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[float, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> float:
+        return float(data)
+
+
+class Integer(Field):
+    """
+    :arg null_value:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "integer"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[int, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> int:
+        return int(data)
+
+
+class Object(Field):
+    """
+    :arg doc_class: base doc class that handles mapping.
+       If no `doc_class` is provided, new instance of `InnerDoc` will be created,
+       populated with `properties` and used. Can not be provided together with `properties`
+    :arg enabled:
+    :arg subobjects:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "object"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        doc_class: Union[Type["InnerDoc"], "DefaultType"] = DEFAULT,
+        *args: Any,
+        enabled: Union[bool, "DefaultType"] = DEFAULT,
+        subobjects: Union[
+            Literal["true", "false", "auto"], bool, "DefaultType"
+        ] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if enabled is not DEFAULT:
+            kwargs["enabled"] = enabled
+        if subobjects is not DEFAULT:
+            kwargs["subobjects"] = subobjects
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+
+        if doc_class is not DEFAULT and (
+            properties is not DEFAULT or dynamic is not DEFAULT
+        ):
+            raise ValidationException(
+                "doc_class and properties/dynamic should not be provided together"
+            )
+        if doc_class is not DEFAULT:
+            self._doc_class: Type["InnerDoc"] = doc_class
+        else:
+            # FIXME import
+            from .document import InnerDoc
+
+            # no InnerDoc subclass, creating one instead...
+            self._doc_class = type("InnerDoc", (InnerDoc,), {})
+            for name, field in (
+                properties if properties is not DEFAULT else {}
+            ).items():
+                self._doc_class._doc_type.mapping.field(name, field)
+            if "properties" in kwargs:
+                del kwargs["properties"]
+            if dynamic is not DEFAULT:
+                self._doc_class._doc_type.mapping.meta("dynamic", dynamic)
+
+        self._mapping: "MappingBase" = deepcopy(self._doc_class._doc_type.mapping)
+        super().__init__(**kwargs)
+
+    def __getitem__(self, name: str) -> Field:
+        return self._mapping[name]
+
+    def __contains__(self, name: str) -> bool:
+        return name in self._mapping
+
+    def _empty(self) -> "InnerDoc":
+        return self._wrap({})
+
+    def _wrap(self, data: Dict[str, Any]) -> "InnerDoc":
+        return self._doc_class.from_es(data, data_only=True)
+
+    def empty(self) -> Union["InnerDoc", AttrList[Any]]:
+        if self._multi:
+            return AttrList[Any]([], self._wrap)
+        return self._empty()
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = self._mapping.to_dict()
+        d.update(super().to_dict())
+        return d
+
+    def _collect_fields(self) -> Iterator[Field]:
+        return self._mapping.properties._collect_fields()
+
+    def _deserialize(self, data: Any) -> "InnerDoc":
+        # don't wrap already wrapped data
+        if isinstance(data, self._doc_class):
+            return data
+
+        if isinstance(data, AttrDict):
+            data = data._d_
+
+        return self._wrap(data)
+
+    def _serialize(
+        self, data: Optional[Union[Dict[str, Any], "InnerDoc"]], skip_empty: bool
+    ) -> Optional[Dict[str, Any]]:
+        if data is None:
+            return None
+
+        # somebody assigned raw dict to the field, we should tolerate that
+        if isinstance(data, collections.abc.Mapping):
+            return data
+
+        try:
+            return data.to_dict(skip_empty=skip_empty)
+        except TypeError:
+            # this would only happen if an AttrDict was given instead of an InnerDoc
+            return data.to_dict()
+
+    def clean(self, data: Any) -> Any:
+        data = super().clean(data)
+        if data is None:
+            return None
+        if isinstance(data, (list, AttrList)):
+            for d in cast(Iterator["InnerDoc"], data):
+                d.full_clean()
+        else:
+            data.full_clean()
+        return data
+
+    def update(self, other: Any, update_only: bool = False) -> None:
+        if not isinstance(other, Object):
+            # not an inner/nested object, no merge possible
+            return
+
+        self._mapping.update(other._mapping, update_only)
+
+
+class AggregateMetricDouble(Field):
+    """
+    :arg default_metric: (required)
+    :arg metrics: (required)
+    :arg ignore_malformed:
+    :arg time_series_metric:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "aggregate_metric_double"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        default_metric: Union[str, "DefaultType"] = DEFAULT,
+        metrics: Union[Sequence[str], "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if default_metric is not DEFAULT:
+            kwargs["default_metric"] = default_metric
+        if metrics is not DEFAULT:
+            kwargs["metrics"] = metrics
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Alias(Field):
+    """
+    :arg path:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "alias"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if path is not DEFAULT:
+            kwargs["path"] = str(path)
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Binary(Field):
+    """
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "binary"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+    def clean(self, data: str) -> str:
+        # Binary fields are opaque, so there's not much cleaning
+        # that can be done.
+        return data
+
+    def _deserialize(self, data: Any) -> bytes:
+        return base64.b64decode(data)
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Optional[str]:
+        if data is None:
+            return None
+        return base64.b64encode(data).decode()
+
+
+class Boolean(Field):
+    """
+    :arg boost:
+    :arg fielddata:
+    :arg index:
+    :arg null_value:
+    :arg ignore_malformed:
+    :arg script:
+    :arg on_script_error:
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "boolean"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        fielddata: Union[
+            "types.NumericFielddata", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        null_value: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if fielddata is not DEFAULT:
+            kwargs["fielddata"] = fielddata
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> bool:
+        if data == "false":
+            return False
+        return bool(data)
+
+    def clean(self, data: Any) -> Optional[bool]:
+        if data is not None:
+            data = self.deserialize(data)
+        if data is None and self._required:
+            raise ValidationException("Value required for this field.")
+        return data  # type: ignore[no-any-return]
+
+
+class Byte(Integer):
+    """
+    :arg null_value:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "byte"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[float, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Completion(Field):
+    """
+    :arg analyzer:
+    :arg contexts:
+    :arg max_input_length:
+    :arg preserve_position_increments:
+    :arg preserve_separators:
+    :arg search_analyzer:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "completion"
+    _param_defs = {
+        "analyzer": {"type": "analyzer"},
+        "search_analyzer": {"type": "analyzer"},
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        contexts: Union[
+            Sequence["types.SuggestContext"], Sequence[Dict[str, Any]], "DefaultType"
+        ] = DEFAULT,
+        max_input_length: Union[int, "DefaultType"] = DEFAULT,
+        preserve_position_increments: Union[bool, "DefaultType"] = DEFAULT,
+        preserve_separators: Union[bool, "DefaultType"] = DEFAULT,
+        search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if contexts is not DEFAULT:
+            kwargs["contexts"] = contexts
+        if max_input_length is not DEFAULT:
+            kwargs["max_input_length"] = max_input_length
+        if preserve_position_increments is not DEFAULT:
+            kwargs["preserve_position_increments"] = preserve_position_increments
+        if preserve_separators is not DEFAULT:
+            kwargs["preserve_separators"] = preserve_separators
+        if search_analyzer is not DEFAULT:
+            kwargs["search_analyzer"] = search_analyzer
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class ConstantKeyword(Field):
+    """
+    :arg value:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "constant_keyword"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        value: Any = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if value is not DEFAULT:
+            kwargs["value"] = value
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class CountedKeyword(Field):
+    """
+    :arg index:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "counted_keyword"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Date(Field):
+    """
+    :arg default_timezone: timezone that will be automatically used for tz-naive values
+       May be instance of `datetime.tzinfo` or string containing TZ offset
+    :arg boost:
+    :arg fielddata:
+    :arg format:
+    :arg ignore_malformed:
+    :arg index:
+    :arg script:
+    :arg on_script_error:
+    :arg null_value:
+    :arg precision_step:
+    :arg locale:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "date"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        default_timezone: Union[str, "tzinfo", "DefaultType"] = DEFAULT,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        fielddata: Union[
+            "types.NumericFielddata", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        null_value: Any = DEFAULT,
+        precision_step: Union[int, "DefaultType"] = DEFAULT,
+        locale: Union[str, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if fielddata is not DEFAULT:
+            kwargs["fielddata"] = fielddata
+        if format is not DEFAULT:
+            kwargs["format"] = format
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if precision_step is not DEFAULT:
+            kwargs["precision_step"] = precision_step
+        if locale is not DEFAULT:
+            kwargs["locale"] = locale
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+
+        if default_timezone is DEFAULT:
+            self._default_timezone = None
+        elif isinstance(default_timezone, str):
+            self._default_timezone = tz.gettz(default_timezone)
+        else:
+            self._default_timezone = default_timezone
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> Union[datetime, date]:
+        if isinstance(data, str):
+            try:
+                data = parser.parse(data)
+            except Exception as e:
+                raise ValidationException(
+                    f"Could not parse date from the value ({data!r})", e
+                )
+            # we treat the yyyy-MM-dd format as a special case
+            if hasattr(self, "format") and self.format == "yyyy-MM-dd":
+                data = data.date()
+
+        if isinstance(data, datetime):
+            if self._default_timezone and data.tzinfo is None:
+                data = data.replace(tzinfo=self._default_timezone)
+            return cast(datetime, data)
+        if isinstance(data, date):
+            return data
+        if isinstance(data, int):
+            # Divide by a float to preserve milliseconds on the datetime.
+            return datetime.utcfromtimestamp(data / 1000.0)
+
+        raise ValidationException(f"Could not parse date from the value ({data!r})")
+
+
+class DateNanos(Field):
+    """
+    :arg boost:
+    :arg format:
+    :arg ignore_malformed:
+    :arg index:
+    :arg script:
+    :arg on_script_error:
+    :arg null_value:
+    :arg precision_step:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "date_nanos"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        null_value: Any = DEFAULT,
+        precision_step: Union[int, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if format is not DEFAULT:
+            kwargs["format"] = format
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if precision_step is not DEFAULT:
+            kwargs["precision_step"] = precision_step
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class DateRange(RangeField):
+    """
+    :arg format:
+    :arg boost:
+    :arg coerce:
+    :arg index:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "date_range"
+    _core_field = Date()
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        format: Union[str, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if format is not DEFAULT:
+            kwargs["format"] = format
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class DenseVector(Field):
+    """
+    :arg dims: Number of vector dimensions. Can't exceed `4096`. If `dims`
+        is not specified, it will be set to the length of the first vector
+        added to the field.
+    :arg element_type: The data type used to encode vectors. The supported
+        data types are `float` (default), `byte`, and `bit`. Defaults to
+        `float` if omitted.
+    :arg index: If `true`, you can search this field using the kNN search
+        API. Defaults to `True` if omitted.
+    :arg index_options: An optional section that configures the kNN
+        indexing algorithm. The HNSW algorithm has two internal parameters
+        that influence how the data structure is built. These can be
+        adjusted to improve the accuracy of results, at the expense of
+        slower indexing speed.  This parameter can only be specified when
+        `index` is `true`.
+    :arg similarity: The vector similarity metric to use in kNN search.
+        Documents are ranked by their vector field's similarity to the
+        query vector. The `_score` of each document will be derived from
+        the similarity, in a way that ensures scores are positive and that
+        a larger score corresponds to a higher ranking.  Defaults to
+        `l2_norm` when `element_type` is `bit` otherwise defaults to
+        `cosine`.  `bit` vectors only support `l2_norm` as their
+        similarity metric.  This parameter can only be specified when
+        `index` is `true`.
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "dense_vector"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        dims: Union[int, "DefaultType"] = DEFAULT,
+        element_type: Union[Literal["bit", "byte", "float"], "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        index_options: Union[
+            "types.DenseVectorIndexOptions", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        similarity: Union[
+            Literal["cosine", "dot_product", "l2_norm", "max_inner_product"],
+            "DefaultType",
+        ] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if dims is not DEFAULT:
+            kwargs["dims"] = dims
+        if element_type is not DEFAULT:
+            kwargs["element_type"] = element_type
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if index_options is not DEFAULT:
+            kwargs["index_options"] = index_options
+        if similarity is not DEFAULT:
+            kwargs["similarity"] = similarity
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        self._element_type = kwargs.get("element_type", "float")
+        if self._element_type in ["float", "byte"]:
+            kwargs["multi"] = True
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> Any:
+        if self._element_type == "float":
+            return float(data)
+        elif self._element_type == "byte":
+            return int(data)
+        return data
+
+
+class Double(Float):
+    """
+    :arg null_value:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "double"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[float, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class DoubleRange(RangeField):
+    """
+    :arg boost:
+    :arg coerce:
+    :arg index:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "double_range"
+    _core_field = Double()
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Flattened(Field):
+    """
+    :arg boost:
+    :arg depth_limit:
+    :arg doc_values:
+    :arg eager_global_ordinals:
+    :arg index:
+    :arg index_options:
+    :arg null_value:
+    :arg similarity:
+    :arg split_queries_on_whitespace:
+    :arg time_series_dimensions:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "flattened"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        depth_limit: Union[int, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        index_options: Union[
+            Literal["docs", "freqs", "positions", "offsets"], "DefaultType"
+        ] = DEFAULT,
+        null_value: Union[str, "DefaultType"] = DEFAULT,
+        similarity: Union[str, "DefaultType"] = DEFAULT,
+        split_queries_on_whitespace: Union[bool, "DefaultType"] = DEFAULT,
+        time_series_dimensions: Union[Sequence[str], "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if depth_limit is not DEFAULT:
+            kwargs["depth_limit"] = depth_limit
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if eager_global_ordinals is not DEFAULT:
+            kwargs["eager_global_ordinals"] = eager_global_ordinals
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if index_options is not DEFAULT:
+            kwargs["index_options"] = index_options
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if similarity is not DEFAULT:
+            kwargs["similarity"] = similarity
+        if split_queries_on_whitespace is not DEFAULT:
+            kwargs["split_queries_on_whitespace"] = split_queries_on_whitespace
+        if time_series_dimensions is not DEFAULT:
+            kwargs["time_series_dimensions"] = time_series_dimensions
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class FloatRange(RangeField):
+    """
+    :arg boost:
+    :arg coerce:
+    :arg index:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "float_range"
+    _core_field = Float()
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class GeoPoint(Field):
+    """
+    :arg ignore_malformed:
+    :arg ignore_z_value:
+    :arg null_value:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "geo_point"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_z_value: Union[bool, "DefaultType"] = DEFAULT,
+        null_value: Union[
+            "types.LatLonGeoLocation",
+            "types.GeoHashLocation",
+            Sequence[float],
+            str,
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "position"], "DefaultType"
+        ] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if ignore_z_value is not DEFAULT:
+            kwargs["ignore_z_value"] = ignore_z_value
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class GeoShape(Field):
+    """
+    The `geo_shape` data type facilitates the indexing of and searching
+    with arbitrary geo shapes such as rectangles and polygons.
+
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg ignore_z_value:
+    :arg index:
+    :arg orientation:
+    :arg strategy:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "geo_shape"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_z_value: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        orientation: Union[Literal["right", "left"], "DefaultType"] = DEFAULT,
+        strategy: Union[Literal["recursive", "term"], "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if ignore_z_value is not DEFAULT:
+            kwargs["ignore_z_value"] = ignore_z_value
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if orientation is not DEFAULT:
+            kwargs["orientation"] = orientation
+        if strategy is not DEFAULT:
+            kwargs["strategy"] = strategy
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class HalfFloat(Float):
+    """
+    :arg null_value:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "half_float"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[float, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Histogram(Field):
+    """
+    :arg ignore_malformed:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "histogram"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class IcuCollationKeyword(Field):
+    """
+    :arg norms:
+    :arg index_options:
+    :arg index: Should the field be searchable?
+    :arg null_value: Accepts a string value which is substituted for any
+        explicit null values. Defaults to null, which means the field is
+        treated as missing.
+    :arg rules:
+    :arg language:
+    :arg country:
+    :arg variant:
+    :arg strength:
+    :arg decomposition:
+    :arg alternate:
+    :arg case_level:
+    :arg case_first:
+    :arg numeric:
+    :arg variable_top:
+    :arg hiragana_quaternary_mode:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "icu_collation_keyword"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        norms: Union[bool, "DefaultType"] = DEFAULT,
+        index_options: Union[
+            Literal["docs", "freqs", "positions", "offsets"], "DefaultType"
+        ] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        null_value: Union[str, "DefaultType"] = DEFAULT,
+        rules: Union[str, "DefaultType"] = DEFAULT,
+        language: Union[str, "DefaultType"] = DEFAULT,
+        country: Union[str, "DefaultType"] = DEFAULT,
+        variant: Union[str, "DefaultType"] = DEFAULT,
+        strength: Union[
+            Literal["primary", "secondary", "tertiary", "quaternary", "identical"],
+            "DefaultType",
+        ] = DEFAULT,
+        decomposition: Union[Literal["no", "identical"], "DefaultType"] = DEFAULT,
+        alternate: Union[Literal["shifted", "non-ignorable"], "DefaultType"] = DEFAULT,
+        case_level: Union[bool, "DefaultType"] = DEFAULT,
+        case_first: Union[Literal["lower", "upper"], "DefaultType"] = DEFAULT,
+        numeric: Union[bool, "DefaultType"] = DEFAULT,
+        variable_top: Union[str, "DefaultType"] = DEFAULT,
+        hiragana_quaternary_mode: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if norms is not DEFAULT:
+            kwargs["norms"] = norms
+        if index_options is not DEFAULT:
+            kwargs["index_options"] = index_options
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if rules is not DEFAULT:
+            kwargs["rules"] = rules
+        if language is not DEFAULT:
+            kwargs["language"] = language
+        if country is not DEFAULT:
+            kwargs["country"] = country
+        if variant is not DEFAULT:
+            kwargs["variant"] = variant
+        if strength is not DEFAULT:
+            kwargs["strength"] = strength
+        if decomposition is not DEFAULT:
+            kwargs["decomposition"] = decomposition
+        if alternate is not DEFAULT:
+            kwargs["alternate"] = alternate
+        if case_level is not DEFAULT:
+            kwargs["case_level"] = case_level
+        if case_first is not DEFAULT:
+            kwargs["case_first"] = case_first
+        if numeric is not DEFAULT:
+            kwargs["numeric"] = numeric
+        if variable_top is not DEFAULT:
+            kwargs["variable_top"] = variable_top
+        if hiragana_quaternary_mode is not DEFAULT:
+            kwargs["hiragana_quaternary_mode"] = hiragana_quaternary_mode
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class IntegerRange(RangeField):
+    """
+    :arg boost:
+    :arg coerce:
+    :arg index:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "integer_range"
+    _core_field = Integer()
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Ip(Field):
+    """
+    :arg boost:
+    :arg index:
+    :arg ignore_malformed:
+    :arg null_value:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "ip"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        null_value: Union[str, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> Union["IPv4Address", "IPv6Address"]:
+        # the ipaddress library for pypy only accepts unicode.
+        return ipaddress.ip_address(unicode(data))
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Optional[str]:
+        if data is None:
+            return None
+        return str(data)
+
+
+class IpRange(Field):
+    """
+    :arg boost:
+    :arg coerce:
+    :arg index:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "ip_range"
+    _core_field = Ip()
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Join(Field):
+    """
+    :arg relations:
+    :arg eager_global_ordinals:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "join"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        relations: Union[
+            Mapping[str, Union[str, Sequence[str]]], "DefaultType"
+        ] = DEFAULT,
+        eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if relations is not DEFAULT:
+            kwargs["relations"] = relations
+        if eager_global_ordinals is not DEFAULT:
+            kwargs["eager_global_ordinals"] = eager_global_ordinals
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Keyword(Field):
+    """
+    :arg boost:
+    :arg eager_global_ordinals:
+    :arg index:
+    :arg index_options:
+    :arg script:
+    :arg on_script_error:
+    :arg normalizer:
+    :arg norms:
+    :arg null_value:
+    :arg similarity:
+    :arg split_queries_on_whitespace:
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "keyword"
+    _param_defs = {
+        "normalizer": {"type": "normalizer"},
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        index_options: Union[
+            Literal["docs", "freqs", "positions", "offsets"], "DefaultType"
+        ] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        normalizer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        norms: Union[bool, "DefaultType"] = DEFAULT,
+        null_value: Union[str, "DefaultType"] = DEFAULT,
+        similarity: Union[str, None, "DefaultType"] = DEFAULT,
+        split_queries_on_whitespace: Union[bool, "DefaultType"] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if eager_global_ordinals is not DEFAULT:
+            kwargs["eager_global_ordinals"] = eager_global_ordinals
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if index_options is not DEFAULT:
+            kwargs["index_options"] = index_options
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if normalizer is not DEFAULT:
+            kwargs["normalizer"] = normalizer
+        if norms is not DEFAULT:
+            kwargs["norms"] = norms
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if similarity is not DEFAULT:
+            kwargs["similarity"] = similarity
+        if split_queries_on_whitespace is not DEFAULT:
+            kwargs["split_queries_on_whitespace"] = split_queries_on_whitespace
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Long(Integer):
+    """
+    :arg null_value:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "long"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[int, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class LongRange(RangeField):
+    """
+    :arg boost:
+    :arg coerce:
+    :arg index:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "long_range"
+    _core_field = Long()
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class MatchOnlyText(Field):
+    """
+    A variant of text that trades scoring and efficiency of positional
+    queries for space efficiency. This field effectively stores data the
+    same way as a text field that only indexes documents (index_options:
+    docs) and disables norms (norms: false). Term queries perform as fast
+    if not faster as on text fields, however queries that need positions
+    such as the match_phrase query perform slower as they need to look at
+    the _source document to verify whether a phrase matches. All queries
+    return constant scores that are equal to 1.0.
+
+    :arg fields:
+    :arg meta: Metadata about the field.
+    :arg copy_to: Allows you to copy the values of multiple fields into a
+        group field, which can then be queried as a single field.
+    """
+
+    name = "match_only_text"
+    _param_defs = {
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        super().__init__(*args, **kwargs)
+
+
+class Murmur3(Field):
+    """
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "murmur3"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Nested(Object):
+    """
+    :arg enabled:
+    :arg include_in_parent:
+    :arg include_in_root:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "nested"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        enabled: Union[bool, "DefaultType"] = DEFAULT,
+        include_in_parent: Union[bool, "DefaultType"] = DEFAULT,
+        include_in_root: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if enabled is not DEFAULT:
+            kwargs["enabled"] = enabled
+        if include_in_parent is not DEFAULT:
+            kwargs["include_in_parent"] = include_in_parent
+        if include_in_root is not DEFAULT:
+            kwargs["include_in_root"] = include_in_root
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        kwargs.setdefault("multi", True)
+        super().__init__(*args, **kwargs)
+
+
+class Passthrough(Field):
+    """
+    :arg enabled:
+    :arg priority:
+    :arg time_series_dimension:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "passthrough"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        enabled: Union[bool, "DefaultType"] = DEFAULT,
+        priority: Union[int, "DefaultType"] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if enabled is not DEFAULT:
+            kwargs["enabled"] = enabled
+        if priority is not DEFAULT:
+            kwargs["priority"] = priority
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Percolator(Field):
+    """
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "percolator"
+    _coerce = True
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> "Query":
+        return Q(data)  # type: ignore[no-any-return]
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Optional[Dict[str, Any]]:
+        if data is None:
+            return None
+        return data.to_dict()  # type: ignore[no-any-return]
+
+
+class Point(Field):
+    """
+    :arg ignore_malformed:
+    :arg ignore_z_value:
+    :arg null_value:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "point"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_z_value: Union[bool, "DefaultType"] = DEFAULT,
+        null_value: Union[str, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if ignore_z_value is not DEFAULT:
+            kwargs["ignore_z_value"] = ignore_z_value
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class RankFeature(Float):
+    """
+    :arg positive_score_impact:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "rank_feature"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        positive_score_impact: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if positive_score_impact is not DEFAULT:
+            kwargs["positive_score_impact"] = positive_score_impact
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class RankFeatures(Field):
+    """
+    :arg positive_score_impact:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "rank_features"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        positive_score_impact: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if positive_score_impact is not DEFAULT:
+            kwargs["positive_score_impact"] = positive_score_impact
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class RankVectors(Field):
+    """
+    Technical preview
+
+    :arg element_type:
+    :arg dims:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "rank_vectors"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        element_type: Union[Literal["byte", "float", "bit"], "DefaultType"] = DEFAULT,
+        dims: Union[int, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if element_type is not DEFAULT:
+            kwargs["element_type"] = element_type
+        if dims is not DEFAULT:
+            kwargs["dims"] = dims
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class ScaledFloat(Float):
+    """
+    :arg null_value:
+    :arg scaling_factor:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "scaled_float"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[float, "DefaultType"] = DEFAULT,
+        scaling_factor: Union[float, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if scaling_factor is not DEFAULT:
+            kwargs["scaling_factor"] = scaling_factor
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        if "scaling_factor" not in kwargs:
+            if len(args) > 0:
+                kwargs["scaling_factor"] = args[0]
+                args = args[1:]
+            else:
+                raise TypeError("missing required argument: 'scaling_factor'")
+        super().__init__(*args, **kwargs)
+
+
+class SearchAsYouType(Field):
+    """
+    :arg analyzer:
+    :arg index:
+    :arg index_options:
+    :arg max_shingle_size:
+    :arg norms:
+    :arg search_analyzer:
+    :arg search_quote_analyzer:
+    :arg similarity:
+    :arg term_vector:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "search_as_you_type"
+    _param_defs = {
+        "analyzer": {"type": "analyzer"},
+        "search_analyzer": {"type": "analyzer"},
+        "search_quote_analyzer": {"type": "analyzer"},
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        index_options: Union[
+            Literal["docs", "freqs", "positions", "offsets"], "DefaultType"
+        ] = DEFAULT,
+        max_shingle_size: Union[int, "DefaultType"] = DEFAULT,
+        norms: Union[bool, "DefaultType"] = DEFAULT,
+        search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        search_quote_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        similarity: Union[str, None, "DefaultType"] = DEFAULT,
+        term_vector: Union[
+            Literal[
+                "no",
+                "yes",
+                "with_offsets",
+                "with_positions",
+                "with_positions_offsets",
+                "with_positions_offsets_payloads",
+                "with_positions_payloads",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if index_options is not DEFAULT:
+            kwargs["index_options"] = index_options
+        if max_shingle_size is not DEFAULT:
+            kwargs["max_shingle_size"] = max_shingle_size
+        if norms is not DEFAULT:
+            kwargs["norms"] = norms
+        if search_analyzer is not DEFAULT:
+            kwargs["search_analyzer"] = search_analyzer
+        if search_quote_analyzer is not DEFAULT:
+            kwargs["search_quote_analyzer"] = search_quote_analyzer
+        if similarity is not DEFAULT:
+            kwargs["similarity"] = similarity
+        if term_vector is not DEFAULT:
+            kwargs["term_vector"] = term_vector
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class SemanticText(Field):
+    """
+    :arg meta:
+    :arg inference_id: Inference endpoint that will be used to generate
+        embeddings for the field. This parameter cannot be updated. Use
+        the Create inference API to create the endpoint. If
+        `search_inference_id` is specified, the inference endpoint will
+        only be used at index time. Defaults to `.elser-2-elasticsearch`
+        if omitted.
+    :arg search_inference_id: Inference endpoint that will be used to
+        generate embeddings at query time. You can update this parameter
+        by using the Update mapping API. Use the Create inference API to
+        create the endpoint. If not specified, the inference endpoint
+        defined by inference_id will be used at both index and query time.
+    :arg index_options: Settings for index_options that override any
+        defaults used by semantic_text, for example specific quantization
+        settings.
+    :arg chunking_settings: Settings for chunking text into smaller
+        passages. If specified, these will override the chunking settings
+        sent in the inference endpoint associated with inference_id. If
+        chunking settings are updated, they will not be applied to
+        existing documents until they are reindexed.
+    :arg fields:
+    """
+
+    name = "semantic_text"
+    _param_defs = {
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        inference_id: Union[str, "DefaultType"] = DEFAULT,
+        search_inference_id: Union[str, "DefaultType"] = DEFAULT,
+        index_options: Union[
+            "types.SemanticTextIndexOptions", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        chunking_settings: Union[
+            "types.ChunkingSettings", None, Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if inference_id is not DEFAULT:
+            kwargs["inference_id"] = inference_id
+        if search_inference_id is not DEFAULT:
+            kwargs["search_inference_id"] = search_inference_id
+        if index_options is not DEFAULT:
+            kwargs["index_options"] = index_options
+        if chunking_settings is not DEFAULT:
+            kwargs["chunking_settings"] = chunking_settings
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        super().__init__(*args, **kwargs)
+
+
+class Shape(Field):
+    """
+    The `shape` data type facilitates the indexing of and searching with
+    arbitrary `x, y` cartesian shapes such as rectangles and polygons.
+
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg ignore_z_value:
+    :arg orientation:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "shape"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_z_value: Union[bool, "DefaultType"] = DEFAULT,
+        orientation: Union[Literal["right", "left"], "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if ignore_z_value is not DEFAULT:
+            kwargs["ignore_z_value"] = ignore_z_value
+        if orientation is not DEFAULT:
+            kwargs["orientation"] = orientation
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Short(Integer):
+    """
+    :arg null_value:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "short"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[float, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class SparseVector(Field):
+    """
+    :arg store:
+    :arg index_options: Additional index options for the sparse vector
+        field that controls the token pruning behavior of the sparse
+        vector field.
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "sparse_vector"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        index_options: Union[
+            "types.SparseVectorIndexOptions", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if index_options is not DEFAULT:
+            kwargs["index_options"] = index_options
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Text(Field):
+    """
+    :arg analyzer:
+    :arg boost:
+    :arg eager_global_ordinals:
+    :arg fielddata:
+    :arg fielddata_frequency_filter:
+    :arg index:
+    :arg index_options:
+    :arg index_phrases:
+    :arg index_prefixes:
+    :arg norms:
+    :arg position_increment_gap:
+    :arg search_analyzer:
+    :arg search_quote_analyzer:
+    :arg similarity:
+    :arg term_vector:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "text"
+    _param_defs = {
+        "analyzer": {"type": "analyzer"},
+        "search_analyzer": {"type": "analyzer"},
+        "search_quote_analyzer": {"type": "analyzer"},
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT,
+        fielddata: Union[bool, "DefaultType"] = DEFAULT,
+        fielddata_frequency_filter: Union[
+            "types.FielddataFrequencyFilter", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        index_options: Union[
+            Literal["docs", "freqs", "positions", "offsets"], "DefaultType"
+        ] = DEFAULT,
+        index_phrases: Union[bool, "DefaultType"] = DEFAULT,
+        index_prefixes: Union[
+            "types.TextIndexPrefixes", None, Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        norms: Union[bool, "DefaultType"] = DEFAULT,
+        position_increment_gap: Union[int, "DefaultType"] = DEFAULT,
+        search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        search_quote_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        similarity: Union[str, None, "DefaultType"] = DEFAULT,
+        term_vector: Union[
+            Literal[
+                "no",
+                "yes",
+                "with_offsets",
+                "with_positions",
+                "with_positions_offsets",
+                "with_positions_offsets_payloads",
+                "with_positions_payloads",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if eager_global_ordinals is not DEFAULT:
+            kwargs["eager_global_ordinals"] = eager_global_ordinals
+        if fielddata is not DEFAULT:
+            kwargs["fielddata"] = fielddata
+        if fielddata_frequency_filter is not DEFAULT:
+            kwargs["fielddata_frequency_filter"] = fielddata_frequency_filter
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if index_options is not DEFAULT:
+            kwargs["index_options"] = index_options
+        if index_phrases is not DEFAULT:
+            kwargs["index_phrases"] = index_phrases
+        if index_prefixes is not DEFAULT:
+            kwargs["index_prefixes"] = index_prefixes
+        if norms is not DEFAULT:
+            kwargs["norms"] = norms
+        if position_increment_gap is not DEFAULT:
+            kwargs["position_increment_gap"] = position_increment_gap
+        if search_analyzer is not DEFAULT:
+            kwargs["search_analyzer"] = search_analyzer
+        if search_quote_analyzer is not DEFAULT:
+            kwargs["search_quote_analyzer"] = search_quote_analyzer
+        if similarity is not DEFAULT:
+            kwargs["similarity"] = similarity
+        if term_vector is not DEFAULT:
+            kwargs["term_vector"] = term_vector
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class TokenCount(Field):
+    """
+    :arg analyzer:
+    :arg boost:
+    :arg index:
+    :arg null_value:
+    :arg enable_position_increments:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "token_count"
+    _param_defs = {
+        "analyzer": {"type": "analyzer"},
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        null_value: Union[float, "DefaultType"] = DEFAULT,
+        enable_position_increments: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if enable_position_increments is not DEFAULT:
+            kwargs["enable_position_increments"] = enable_position_increments
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class UnsignedLong(Field):
+    """
+    :arg null_value:
+    :arg boost:
+    :arg coerce:
+    :arg ignore_malformed:
+    :arg index:
+    :arg on_script_error:
+    :arg script:
+    :arg time_series_metric: For internal use by Elastic only. Marks the
+        field as a time series dimension. Defaults to false.
+    :arg time_series_dimension: For internal use by Elastic only. Marks
+        the field as a time series dimension. Defaults to false.
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "unsigned_long"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[int, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        coerce: Union[bool, "DefaultType"] = DEFAULT,
+        ignore_malformed: Union[bool, "DefaultType"] = DEFAULT,
+        index: Union[bool, "DefaultType"] = DEFAULT,
+        on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        time_series_metric: Union[
+            Literal["gauge", "counter", "summary", "histogram", "position"],
+            "DefaultType",
+        ] = DEFAULT,
+        time_series_dimension: Union[bool, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if coerce is not DEFAULT:
+            kwargs["coerce"] = coerce
+        if ignore_malformed is not DEFAULT:
+            kwargs["ignore_malformed"] = ignore_malformed
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if on_script_error is not DEFAULT:
+            kwargs["on_script_error"] = on_script_error
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if time_series_metric is not DEFAULT:
+            kwargs["time_series_metric"] = time_series_metric
+        if time_series_dimension is not DEFAULT:
+            kwargs["time_series_dimension"] = time_series_dimension
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Version(Field):
+    """
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "version"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
+
+
+class Wildcard(Field):
+    """
+    :arg null_value:
+    :arg doc_values:
+    :arg copy_to:
+    :arg store:
+    :arg meta: Metadata about the field.
+    :arg properties:
+    :arg ignore_above:
+    :arg dynamic:
+    :arg fields:
+    :arg synthetic_source_keep:
+    """
+
+    name = "wildcard"
+    _param_defs = {
+        "properties": {"type": "field", "hash": True},
+        "fields": {"type": "field", "hash": True},
+    }
+
+    def __init__(
+        self,
+        *args: Any,
+        null_value: Union[str, "DefaultType"] = DEFAULT,
+        doc_values: Union[bool, "DefaultType"] = DEFAULT,
+        copy_to: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        store: Union[bool, "DefaultType"] = DEFAULT,
+        meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
+        properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        ignore_above: Union[int, "DefaultType"] = DEFAULT,
+        dynamic: Union[
+            Literal["strict", "runtime", "true", "false"], bool, "DefaultType"
+        ] = DEFAULT,
+        fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
+        synthetic_source_keep: Union[
+            Literal["none", "arrays", "all"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if null_value is not DEFAULT:
+            kwargs["null_value"] = null_value
+        if doc_values is not DEFAULT:
+            kwargs["doc_values"] = doc_values
+        if copy_to is not DEFAULT:
+            if isinstance(copy_to, list):
+                kwargs["copy_to"] = [str(field) for field in copy_to]
+            else:
+                kwargs["copy_to"] = str(copy_to)
+        if store is not DEFAULT:
+            kwargs["store"] = store
+        if meta is not DEFAULT:
+            kwargs["meta"] = meta
+        if properties is not DEFAULT:
+            kwargs["properties"] = properties
+        if ignore_above is not DEFAULT:
+            kwargs["ignore_above"] = ignore_above
+        if dynamic is not DEFAULT:
+            kwargs["dynamic"] = dynamic
+        if fields is not DEFAULT:
+            kwargs["fields"] = fields
+        if synthetic_source_keep is not DEFAULT:
+            kwargs["synthetic_source_keep"] = synthetic_source_keep
+        super().__init__(*args, **kwargs)
diff -pruN 8.17.2-2/elasticsearch/dsl/function.py 9.2.0-1/elasticsearch/dsl/function.py
--- 8.17.2-2/elasticsearch/dsl/function.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/function.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,180 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+from copy import deepcopy
+from typing import (
+    Any,
+    ClassVar,
+    Dict,
+    Literal,
+    MutableMapping,
+    Optional,
+    Union,
+    overload,
+)
+
+from elastic_transport.client_utils import DEFAULT, DefaultType
+
+from .utils import AttrDict, DslBase
+
+
+@overload
+def SF(name_or_sf: MutableMapping[str, Any]) -> "ScoreFunction": ...
+
+
+@overload
+def SF(name_or_sf: "ScoreFunction") -> "ScoreFunction": ...
+
+
+@overload
+def SF(name_or_sf: str, **params: Any) -> "ScoreFunction": ...
+
+
+def SF(
+    name_or_sf: Union[str, "ScoreFunction", MutableMapping[str, Any]],
+    **params: Any,
+) -> "ScoreFunction":
+    # {"script_score": {"script": "_score"}, "filter": {}}
+    if isinstance(name_or_sf, collections.abc.MutableMapping):
+        if params:
+            raise ValueError("SF() cannot accept parameters when passing in a dict.")
+
+        kwargs: Dict[str, Any] = {}
+        sf = deepcopy(name_or_sf)
+        for k in ScoreFunction._param_defs:
+            if k in name_or_sf:
+                kwargs[k] = sf.pop(k)
+
+        # not sf, so just filter+weight, which used to be boost factor
+        sf_params = params
+        if not sf:
+            name = "boost_factor"
+        # {'FUNCTION': {...}}
+        elif len(sf) == 1:
+            name, sf_params = sf.popitem()
+        else:
+            raise ValueError(f"SF() got an unexpected fields in the dictionary: {sf!r}")
+
+        # boost factor special case, see elasticsearch #6343
+        if not isinstance(sf_params, collections.abc.Mapping):
+            sf_params = {"value": sf_params}
+
+        # mix known params (from _param_defs) and from inside the function
+        kwargs.update(sf_params)
+        return ScoreFunction.get_dsl_class(name)(**kwargs)
+
+    # ScriptScore(script="_score", filter=Q())
+    if isinstance(name_or_sf, ScoreFunction):
+        if params:
+            raise ValueError(
+                "SF() cannot accept parameters when passing in a ScoreFunction object."
+            )
+        return name_or_sf
+
+    # "script_score", script="_score", filter=Q()
+    return ScoreFunction.get_dsl_class(name_or_sf)(**params)
+
+
+class ScoreFunction(DslBase):
+    _type_name = "score_function"
+    _type_shortcut = staticmethod(SF)
+    _param_defs = {
+        "query": {"type": "query"},
+        "filter": {"type": "query"},
+        "weight": {},
+    }
+    name: ClassVar[Optional[str]] = None
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super().to_dict()
+        # filter and query dicts should be at the same level as us
+        for k in self._param_defs:
+            if self.name is not None:
+                val = d[self.name]
+                if isinstance(val, dict) and k in val:
+                    d[k] = val.pop(k)
+        return d
+
+
+class ScriptScore(ScoreFunction):
+    name = "script_score"
+
+
+class BoostFactor(ScoreFunction):
+    name = "boost_factor"
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super().to_dict()
+        if self.name is not None:
+            val = d[self.name]
+            if isinstance(val, dict):
+                if "value" in val:
+                    d[self.name] = val.pop("value")
+                else:
+                    del d[self.name]
+        return d
+
+
+class RandomScore(ScoreFunction):
+    name = "random_score"
+
+
+class FieldValueFactorScore(ScoreFunction):
+    name = "field_value_factor"
+
+
+class FieldValueFactor(FieldValueFactorScore):  # alias of the above
+    pass
+
+
+class Linear(ScoreFunction):
+    name = "linear"
+
+
+class Gauss(ScoreFunction):
+    name = "gauss"
+
+
+class Exp(ScoreFunction):
+    name = "exp"
+
+
+class DecayFunction(AttrDict[Any]):
+    def __init__(
+        self,
+        *,
+        decay: Union[float, "DefaultType"] = DEFAULT,
+        offset: Any = DEFAULT,
+        scale: Any = DEFAULT,
+        origin: Any = DEFAULT,
+        multi_value_mode: Union[
+            Literal["min", "max", "avg", "sum"], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if decay != DEFAULT:
+            kwargs["decay"] = decay
+        if offset != DEFAULT:
+            kwargs["offset"] = offset
+        if scale != DEFAULT:
+            kwargs["scale"] = scale
+        if origin != DEFAULT:
+            kwargs["origin"] = origin
+        if multi_value_mode != DEFAULT:
+            kwargs["multi_value_mode"] = multi_value_mode
+        super().__init__(kwargs)
diff -pruN 8.17.2-2/elasticsearch/dsl/index.py 9.2.0-1/elasticsearch/dsl/index.py
--- 8.17.2-2/elasticsearch/dsl/index.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/index.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,23 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from ._async.index import (  # noqa: F401
+    AsyncComposableIndexTemplate,
+    AsyncIndex,
+    AsyncIndexTemplate,
+)
+from ._sync.index import ComposableIndexTemplate, Index, IndexTemplate  # noqa: F401
diff -pruN 8.17.2-2/elasticsearch/dsl/index_base.py 9.2.0-1/elasticsearch/dsl/index_base.py
--- 8.17.2-2/elasticsearch/dsl/index_base.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/index_base.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,178 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
+
+from typing_extensions import Self
+
+from . import analysis
+from .utils import AnyUsingType, merge
+
+if TYPE_CHECKING:
+    from .document_base import DocumentMeta
+    from .field import Field
+    from .mapping_base import MappingBase
+
+
+class IndexBase:
+    def __init__(self, name: str, mapping_class: type, using: AnyUsingType = "default"):
+        """
+        :arg name: name of the index
+        :arg using: connection alias to use, defaults to ``'default'``
+        """
+        self._name = name
+        self._doc_types: List["DocumentMeta"] = []
+        self._using = using
+        self._settings: Dict[str, Any] = {}
+        self._aliases: Dict[str, Any] = {}
+        self._analysis: Dict[str, Any] = {}
+        self._mapping_class = mapping_class
+        self._mapping: Optional["MappingBase"] = None
+
+    def resolve_nested(
+        self, field_path: str
+    ) -> Tuple[List[str], Optional["MappingBase"]]:
+        for doc in self._doc_types:
+            nested, field = doc._doc_type.mapping.resolve_nested(field_path)
+            if field is not None:
+                return nested, field
+        if self._mapping:
+            return self._mapping.resolve_nested(field_path)
+        return [], None
+
+    def resolve_field(self, field_path: str) -> Optional["Field"]:
+        for doc in self._doc_types:
+            field = doc._doc_type.mapping.resolve_field(field_path)
+            if field is not None:
+                return field
+        if self._mapping:
+            return self._mapping.resolve_field(field_path)
+        return None
+
+    def get_or_create_mapping(self) -> "MappingBase":
+        if self._mapping is None:
+            self._mapping = self._mapping_class()
+        return self._mapping
+
+    def mapping(self, mapping: "MappingBase") -> None:
+        """
+        Associate a mapping (an instance of
+        :class:`~elasticsearch.dsl.Mapping`) with this index.
+        This means that, when this index is created, it will contain the
+        mappings for the document type defined by those mappings.
+        """
+        self.get_or_create_mapping().update(mapping)
+
+    def document(self, document: "DocumentMeta") -> "DocumentMeta":
+        """
+        Associate a :class:`~elasticsearch.dsl.Document` subclass with an index.
+        This means that, when this index is created, it will contain the
+        mappings for the ``Document``. If the ``Document`` class doesn't have a
+        default index yet (by defining ``class Index``), this instance will be
+        used. Can be used as a decorator::
+
+            i = Index('blog')
+
+            @i.document
+            class Post(Document):
+                title = Text()
+
+            # create the index, including Post mappings
+            i.create()
+
+            # .search() will now return a Search object that will return
+            # properly deserialized Post instances
+            s = i.search()
+        """
+        self._doc_types.append(document)
+
+        # If the document index does not have any name, that means the user
+        # did not set any index already to the document.
+        # So set this index as document index
+        if document._index._name is None:
+            document._index = self
+
+        return document
+
+    def settings(self, **kwargs: Any) -> Self:
+        """
+        Add settings to the index::
+
+            i = Index('i')
+            i.settings(number_of_shards=1, number_of_replicas=0)
+
+        Multiple calls to ``settings`` will merge the keys, later overriding
+        the earlier.
+        """
+        self._settings.update(kwargs)
+        return self
+
+    def aliases(self, **kwargs: Any) -> Self:
+        """
+        Add aliases to the index definition::
+
+            i = Index('blog-v2')
+            i.aliases(blog={}, published={'filter': Q('term', published=True)})
+        """
+        self._aliases.update(kwargs)
+        return self
+
+    def analyzer(self, *args: Any, **kwargs: Any) -> None:
+        """
+        Explicitly add an analyzer to an index. Note that all custom analyzers
+        defined in mappings will also be created. This is useful for search analyzers.
+
+        Example::
+
+            from elasticsearch.dsl import analyzer, tokenizer
+
+            my_analyzer = analyzer('my_analyzer',
+                tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
+                filter=['lowercase']
+            )
+
+            i = Index('blog')
+            i.analyzer(my_analyzer)
+
+        """
+        analyzer = analysis.analyzer(*args, **kwargs)
+        d = analyzer.get_analysis_definition()
+        # empty custom analyzer, probably already defined out of our control
+        if not d:
+            return
+
+        # merge the definition
+        merge(self._analysis, d, True)
+
+    def to_dict(self) -> Dict[str, Any]:
+        out = {}
+        if self._settings:
+            out["settings"] = self._settings
+        if self._aliases:
+            out["aliases"] = self._aliases
+        mappings = self._mapping.to_dict() if self._mapping else {}
+        analysis = self._mapping._collect_analysis() if self._mapping else {}
+        for d in self._doc_types:
+            mapping = d._doc_type.mapping
+            merge(mappings, mapping.to_dict(), True)
+            merge(analysis, mapping._collect_analysis(), True)
+        if mappings:
+            out["mappings"] = mappings
+        if analysis or self._analysis:
+            merge(analysis, self._analysis)
+            out.setdefault("settings", {})["analysis"] = analysis
+        return out
diff -pruN 8.17.2-2/elasticsearch/dsl/mapping.py 9.2.0-1/elasticsearch/dsl/mapping.py
--- 8.17.2-2/elasticsearch/dsl/mapping.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/mapping.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,19 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from ._async.mapping import AsyncMapping  # noqa: F401
+from ._sync.mapping import Mapping  # noqa: F401
diff -pruN 8.17.2-2/elasticsearch/dsl/mapping_base.py 9.2.0-1/elasticsearch/dsl/mapping_base.py
--- 8.17.2-2/elasticsearch/dsl/mapping_base.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/mapping_base.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,219 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+from itertools import chain
+from typing import Any, Dict, Iterator, List, Optional, Tuple, cast
+
+from typing_extensions import Self
+
+from .field import Field, Nested, Text, construct_field
+from .utils import DslBase
+
+META_FIELDS = frozenset(
+    (
+        "dynamic",
+        "transform",
+        "dynamic_date_formats",
+        "date_detection",
+        "numeric_detection",
+        "dynamic_templates",
+        "enabled",
+    )
+)
+
+
+class Properties(DslBase):
+    name = "properties"
+    _param_defs = {"properties": {"type": "field", "hash": True}}
+
+    properties: Dict[str, Field]
+
+    def __init__(self) -> None:
+        super().__init__()
+
+    def __repr__(self) -> str:
+        return "Properties()"
+
+    def __getitem__(self, name: str) -> Field:
+        return self.properties[name]
+
+    def __contains__(self, name: str) -> bool:
+        return name in self.properties
+
+    def to_dict(self) -> Dict[str, Any]:
+        return cast(Dict[str, Field], super().to_dict()["properties"])
+
+    def field(self, name: str, *args: Any, **kwargs: Any) -> Self:
+        self.properties[name] = construct_field(*args, **kwargs)
+        return self
+
+    def _collect_fields(self) -> Iterator[Field]:
+        """Iterate over all Field objects within, including multi fields."""
+        fields = cast(Dict[str, Field], self.properties.to_dict())  # type: ignore[attr-defined]
+        for f in fields.values():
+            yield f
+            # multi fields
+            if hasattr(f, "fields"):
+                yield from f.fields.to_dict().values()
+            # nested and inner objects
+            if hasattr(f, "_collect_fields"):
+                yield from f._collect_fields()
+
+    def update(self, other_object: Any) -> None:
+        if not hasattr(other_object, "properties"):
+            # not an inner/nested object, no merge possible
+            return
+
+        our, other = self.properties, other_object.properties
+        for name in other:
+            if name in our:
+                if hasattr(our[name], "update"):
+                    our[name].update(other[name])
+                continue
+            our[name] = other[name]
+
+
+class MappingBase:
+    def __init__(self) -> None:
+        self.properties = Properties()
+        self._meta: Dict[str, Any] = {}
+
+    def __repr__(self) -> str:
+        return "Mapping()"
+
+    def _clone(self) -> Self:
+        m = self.__class__()
+        m.properties._params = self.properties._params.copy()
+        return m
+
+    def resolve_nested(
+        self, field_path: str
+    ) -> Tuple[List[str], Optional["MappingBase"]]:
+        field = self
+        nested = []
+        parts = field_path.split(".")
+        for i, step in enumerate(parts):
+            try:
+                field = field[step]  # type: ignore[assignment]
+            except KeyError:
+                return [], None
+            if isinstance(field, Nested):
+                nested.append(".".join(parts[: i + 1]))
+        return nested, field
+
+    def resolve_field(self, field_path: str) -> Optional[Field]:
+        field = self
+        for step in field_path.split("."):
+            try:
+                field = field[step]  # type: ignore[assignment]
+            except KeyError:
+                return None
+        return cast(Field, field)
+
+    def _collect_analysis(self) -> Dict[str, Any]:
+        analysis: Dict[str, Any] = {}
+        fields = []
+        if "_all" in self._meta:
+            fields.append(Text(**self._meta["_all"]))
+
+        for f in chain(fields, self.properties._collect_fields()):
+            for analyzer_name in (
+                "analyzer",
+                "normalizer",
+                "search_analyzer",
+                "search_quote_analyzer",
+            ):
+                if not hasattr(f, analyzer_name):
+                    continue
+                analyzer = getattr(f, analyzer_name)
+                d = analyzer.get_analysis_definition()
+                # empty custom analyzer, probably already defined out of our control
+                if not d:
+                    continue
+
+                # merge the definition
+                # TODO: conflict detection/resolution
+                for key in d:
+                    analysis.setdefault(key, {}).update(d[key])
+
+        return analysis
+
+    def _update_from_dict(self, raw: Dict[str, Any]) -> None:
+        for name, definition in raw.get("properties", {}).items():
+            self.field(name, definition)
+
+        # metadata like _all etc
+        for name, value in raw.items():
+            if name != "properties":
+                if isinstance(value, collections.abc.Mapping):
+                    self.meta(name, **value)
+                else:
+                    self.meta(name, value)
+
+    def update(self, mapping: "MappingBase", update_only: bool = False) -> None:
+        for name in mapping:
+            if update_only and name in self:
+                # nested and inner objects, merge recursively
+                if hasattr(self[name], "update"):
+                    # FIXME only merge subfields, not the settings
+                    self[name].update(mapping[name], update_only)
+                continue
+            self.field(name, mapping[name])
+
+        if update_only:
+            for name in mapping._meta:
+                if name not in self._meta:
+                    self._meta[name] = mapping._meta[name]
+        else:
+            self._meta.update(mapping._meta)
+
+    def __contains__(self, name: str) -> bool:
+        return name in self.properties.properties
+
+    def __getitem__(self, name: str) -> Field:
+        return self.properties.properties[name]
+
+    def __iter__(self) -> Iterator[str]:
+        return iter(self.properties.properties)
+
+    def field(self, *args: Any, **kwargs: Any) -> Self:
+        self.properties.field(*args, **kwargs)
+        return self
+
+    def meta(self, name: str, params: Any = None, **kwargs: Any) -> Self:
+        if not name.startswith("_") and name not in META_FIELDS:
+            name = "_" + name
+
+        if params and kwargs:
+            raise ValueError("Meta configs cannot have both value and a dictionary.")
+
+        self._meta[name] = kwargs if params is None else params
+        return self
+
+    def to_dict(self) -> Dict[str, Any]:
+        meta = self._meta
+
+        # hard coded serialization of analyzers in _all
+        if "_all" in meta:
+            meta = meta.copy()
+            _all = meta["_all"] = meta["_all"].copy()
+            for f in ("analyzer", "search_analyzer", "search_quote_analyzer"):
+                if hasattr(_all.get(f, None), "to_dict"):
+                    _all[f] = _all[f].to_dict()
+        meta.update(self.properties.to_dict())
+        return meta
diff -pruN 8.17.2-2/elasticsearch/dsl/pydantic.py 9.2.0-1/elasticsearch/dsl/pydantic.py
--- 8.17.2-2/elasticsearch/dsl/pydantic.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/pydantic.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,152 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, ClassVar, Dict, List, Optional, Tuple, Type
+
+from pydantic import BaseModel, Field, PrivateAttr
+from typing_extensions import Annotated, Self, dataclass_transform
+
+from elasticsearch import dsl
+
+
+class ESMeta(BaseModel):
+    """Metadata items associated with Elasticsearch documents."""
+
+    id: str = ""
+    index: str = ""
+    primary_term: int = 0
+    seq_no: int = 0
+    version: int = 0
+    score: float = 0
+
+
+class _BaseModel(BaseModel):
+    meta: Annotated[ESMeta, dsl.mapped_field(exclude=True)] = Field(
+        default=ESMeta(),
+        init=False,
+    )
+
+
+class _BaseESModelMetaclass(type(BaseModel)):  # type: ignore[misc]
+    """Generic metaclass methods for BaseEsModel and AsyncBaseESModel."""
+
+    @staticmethod
+    def process_annotations(
+        metacls: Type["_BaseESModelMetaclass"], annotations: Dict[str, Any]
+    ) -> Dict[str, Any]:
+        """Process Pydantic typing annotations and adapt them so that they can
+        be used to create the Elasticsearch document.
+        """
+        updated_annotations = {}
+        for var, ann in annotations.items():
+            if isinstance(ann, type(BaseModel)):
+                # an inner Pydantic model is transformed into an Object field
+                updated_annotations[var] = metacls.make_dsl_class(
+                    metacls, dsl.InnerDoc, ann
+                )
+            elif (
+                hasattr(ann, "__origin__")
+                and ann.__origin__ in [list, List]
+                and isinstance(ann.__args__[0], type(BaseModel))
+            ):
+                # an inner list of Pydantic models is transformed into a Nested field
+                updated_annotations[var] = List[  # type: ignore[assignment,misc]
+                    metacls.make_dsl_class(metacls, dsl.InnerDoc, ann.__args__[0])
+                ]
+            else:
+                updated_annotations[var] = ann
+        return updated_annotations
+
+    @staticmethod
+    def make_dsl_class(
+        metacls: Type["_BaseESModelMetaclass"],
+        dsl_class: type,
+        pydantic_model: type,
+        pydantic_attrs: Optional[Dict[str, Any]] = None,
+    ) -> type:
+        """Create a DSL document class dynamically, using the structure of a
+        Pydantic model."""
+        dsl_attrs = {
+            attr: value
+            for attr, value in dsl_class.__dict__.items()
+            if not attr.startswith("__")
+        }
+        pydantic_attrs = {
+            **(pydantic_attrs or {}),
+            "__annotations__": metacls.process_annotations(
+                metacls, pydantic_model.__annotations__
+            ),
+        }
+        return type(dsl_class)(
+            f"_ES{pydantic_model.__name__}",
+            (dsl_class,),
+            {
+                **pydantic_attrs,
+                **dsl_attrs,
+                "__qualname__": f"_ES{pydantic_model.__name__}",
+            },
+        )
+
+
+class BaseESModelMetaclass(_BaseESModelMetaclass):
+    """Metaclass for the BaseESModel class."""
+
+    def __new__(cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]) -> Any:
+        model = super().__new__(cls, name, bases, attrs)
+        model._doc = cls.make_dsl_class(cls, dsl.Document, model, attrs)
+        return model
+
+
+class AsyncBaseESModelMetaclass(_BaseESModelMetaclass):
+    """Metaclass for the AsyncBaseESModel class."""
+
+    def __new__(cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]) -> Any:
+        model = super().__new__(cls, name, bases, attrs)
+        model._doc = cls.make_dsl_class(cls, dsl.AsyncDocument, model, attrs)
+        return model
+
+
+@dataclass_transform(kw_only_default=True, field_specifiers=(Field, PrivateAttr))
+class BaseESModel(_BaseModel, metaclass=BaseESModelMetaclass):
+    _doc: ClassVar[Type[dsl.Document]]
+
+    def to_doc(self) -> dsl.Document:
+        """Convert this model to an Elasticsearch document."""
+        data = self.model_dump()
+        meta = {f"_{k}": v for k, v in data.pop("meta", {}).items() if v}
+        return self._doc(**meta, **data)
+
+    @classmethod
+    def from_doc(cls, dsl_obj: dsl.Document) -> Self:
+        """Create a model from the given Elasticsearch document."""
+        return cls(meta=ESMeta(**dsl_obj.meta.to_dict()), **dsl_obj.to_dict())
+
+
+@dataclass_transform(kw_only_default=True, field_specifiers=(Field, PrivateAttr))
+class AsyncBaseESModel(_BaseModel, metaclass=AsyncBaseESModelMetaclass):
+    _doc: ClassVar[Type[dsl.AsyncDocument]]
+
+    def to_doc(self) -> dsl.AsyncDocument:
+        """Convert this model to an Elasticsearch document."""
+        data = self.model_dump()
+        meta = {f"_{k}": v for k, v in data.pop("meta", {}).items() if v}
+        return self._doc(**meta, **data)
+
+    @classmethod
+    def from_doc(cls, dsl_obj: dsl.AsyncDocument) -> Self:
+        """Create a model from the given Elasticsearch document."""
+        return cls(meta=ESMeta(**dsl_obj.meta.to_dict()), **dsl_obj.to_dict())
diff -pruN 8.17.2-2/elasticsearch/dsl/query.py 9.2.0-1/elasticsearch/dsl/query.py
--- 8.17.2-2/elasticsearch/dsl/query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,2871 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+from copy import deepcopy
+from itertools import chain
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    ClassVar,
+    Dict,
+    List,
+    Literal,
+    Mapping,
+    MutableMapping,
+    Optional,
+    Protocol,
+    Sequence,
+    TypeVar,
+    Union,
+    cast,
+    overload,
+)
+
+from elastic_transport.client_utils import DEFAULT
+
+# 'SF' looks unused but the test suite assumes it's available
+# from this module so others are liable to do so as well.
+from .function import SF  # noqa: F401
+from .function import ScoreFunction
+from .utils import DslBase
+
+if TYPE_CHECKING:
+    from elastic_transport.client_utils import DefaultType
+
+    from . import types, wrappers
+    from .document_base import InstrumentedField
+
+_T = TypeVar("_T")
+_M = TypeVar("_M", bound=Mapping[str, Any])
+
+
+class QProxiedProtocol(Protocol[_T]):
+    _proxied: _T
+
+
+@overload
+def Q(name_or_query: MutableMapping[str, _M]) -> "Query": ...
+
+
+@overload
+def Q(name_or_query: "Query") -> "Query": ...
+
+
+@overload
+def Q(name_or_query: QProxiedProtocol[_T]) -> _T: ...
+
+
+@overload
+def Q(name_or_query: str = "match_all", **params: Any) -> "Query": ...
+
+
+def Q(
+    name_or_query: Union[
+        str,
+        "Query",
+        QProxiedProtocol[_T],
+        MutableMapping[str, _M],
+    ] = "match_all",
+    **params: Any,
+) -> Union["Query", _T]:
+    # {"match": {"title": "python"}}
+    if isinstance(name_or_query, collections.abc.MutableMapping):
+        if params:
+            raise ValueError("Q() cannot accept parameters when passing in a dict.")
+        if len(name_or_query) != 1:
+            raise ValueError(
+                'Q() can only accept dict with a single query ({"match": {...}}). '
+                "Instead it got (%r)" % name_or_query
+            )
+        name, q_params = deepcopy(name_or_query).popitem()
+        return Query.get_dsl_class(name)(_expand__to_dot=False, **q_params)
+
+    # MatchAll()
+    if isinstance(name_or_query, Query):
+        if params:
+            raise ValueError(
+                "Q() cannot accept parameters when passing in a Query object."
+            )
+        return name_or_query
+
+    # s.query = Q('filtered', query=s.query)
+    if hasattr(name_or_query, "_proxied"):
+        return cast(QProxiedProtocol[_T], name_or_query)._proxied
+
+    # "match", title="python"
+    return Query.get_dsl_class(name_or_query)(**params)
+
+
+class Query(DslBase):
+    _type_name = "query"
+    _type_shortcut = staticmethod(Q)
+    name: ClassVar[Optional[str]] = None
+
+    # Add type annotations for methods not defined in every subclass
+    __ror__: ClassVar[Callable[["Query", "Query"], "Query"]]
+    __radd__: ClassVar[Callable[["Query", "Query"], "Query"]]
+    __rand__: ClassVar[Callable[["Query", "Query"], "Query"]]
+
+    def __add__(self, other: "Query") -> "Query":
+        # make sure we give queries that know how to combine themselves
+        # preference
+        if hasattr(other, "__radd__"):
+            return other.__radd__(self)
+        return Bool(must=[self, other])
+
+    def __invert__(self) -> "Query":
+        return Bool(must_not=[self])
+
+    def __or__(self, other: "Query") -> "Query":
+        # make sure we give queries that know how to combine themselves
+        # preference
+        if hasattr(other, "__ror__"):
+            return other.__ror__(self)
+        return Bool(should=[self, other])
+
+    def __and__(self, other: "Query") -> "Query":
+        # make sure we give queries that know how to combine themselves
+        # preference
+        if hasattr(other, "__rand__"):
+            return other.__rand__(self)
+        return Bool(must=[self, other])
+
+
+class Bool(Query):
+    """
+    matches documents matching boolean combinations of other queries.
+
+    :arg filter: The clause (query) must appear in matching documents.
+        However, unlike `must`, the score of the query will be ignored.
+    :arg minimum_should_match: Specifies the number or percentage of
+        `should` clauses returned documents must match.
+    :arg must: The clause (query) must appear in matching documents and
+        will contribute to the score.
+    :arg must_not: The clause (query) must not appear in the matching
+        documents. Because scoring is ignored, a score of `0` is returned
+        for all documents.
+    :arg should: The clause (query) should appear in the matching
+        document.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "bool"
+    _param_defs = {
+        "filter": {"type": "query", "multi": True},
+        "must": {"type": "query", "multi": True},
+        "must_not": {"type": "query", "multi": True},
+        "should": {"type": "query", "multi": True},
+    }
+
+    def __init__(
+        self,
+        *,
+        filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT,
+        minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT,
+        must: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT,
+        must_not: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT,
+        should: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            filter=filter,
+            minimum_should_match=minimum_should_match,
+            must=must,
+            must_not=must_not,
+            should=should,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+    def __add__(self, other: Query) -> "Bool":
+        q = self._clone()
+        if isinstance(other, Bool):
+            q.must += other.must
+            q.should += other.should
+            q.must_not += other.must_not
+            q.filter += other.filter
+        else:
+            q.must.append(other)
+        return q
+
+    __radd__ = __add__
+
+    def __or__(self, other: Query) -> Query:
+        for q in (self, other):
+            if isinstance(q, Bool) and not any(
+                (q.must, q.must_not, q.filter, getattr(q, "minimum_should_match", None))
+            ):
+                other = self if q is other else other
+                q = q._clone()
+                if isinstance(other, Bool) and not any(
+                    (
+                        other.must,
+                        other.must_not,
+                        other.filter,
+                        getattr(other, "minimum_should_match", None),
+                    )
+                ):
+                    q.should.extend(other.should)
+                else:
+                    q.should.append(other)
+                return q
+
+        return Bool(should=[self, other])
+
+    __ror__ = __or__
+
+    @property
+    def _min_should_match(self) -> int:
+        return getattr(
+            self,
+            "minimum_should_match",
+            0 if not self.should or (self.must or self.filter) else 1,
+        )
+
+    def __invert__(self) -> Query:
+        # Because an empty Bool query is treated like
+        # MatchAll the inverse should be MatchNone
+        if not any(chain(self.must, self.filter, self.should, self.must_not)):
+            return MatchNone()
+
+        negations: List[Query] = []
+        for q in chain(self.must, self.filter):
+            negations.append(~q)
+
+        for q in self.must_not:
+            negations.append(q)
+
+        if self.should and self._min_should_match:
+            negations.append(Bool(must_not=self.should[:]))
+
+        if len(negations) == 1:
+            return negations[0]
+        return Bool(should=negations)
+
+    def __and__(self, other: Query) -> Query:
+        q = self._clone()
+        if isinstance(other, Bool):
+            q.must += other.must
+            q.must_not += other.must_not
+            q.filter += other.filter
+            q.should = []
+
+            # reset minimum_should_match as it will get calculated below
+            if "minimum_should_match" in q._params:
+                del q._params["minimum_should_match"]
+
+            for qx in (self, other):
+                min_should_match = qx._min_should_match
+                # TODO: percentages or negative numbers will fail here
+                # for now we report an error
+                if not isinstance(min_should_match, int) or min_should_match < 0:
+                    raise ValueError(
+                        "Can only combine queries with positive integer values for minimum_should_match"
+                    )
+                # all subqueries are required
+                if len(qx.should) <= min_should_match:
+                    q.must.extend(qx.should)
+                # not all of them are required, use it and remember min_should_match
+                elif not q.should:
+                    q.minimum_should_match = min_should_match
+                    q.should = qx.should
+                # all queries are optional, just extend should
+                elif q._min_should_match == 0 and min_should_match == 0:
+                    q.should.extend(qx.should)
+                # not all are required, add a should list to the must with proper min_should_match
+                else:
+                    q.must.append(
+                        Bool(should=qx.should, minimum_should_match=min_should_match)
+                    )
+        else:
+            if not (q.must or q.filter) and q.should:
+                q._params.setdefault("minimum_should_match", 1)
+            q.must.append(other)
+        return q
+
+    __rand__ = __and__
+
+
+class Boosting(Query):
+    """
+    Returns documents matching a `positive` query while reducing the
+    relevance score of documents that also match a `negative` query.
+
+    :arg negative_boost: (required) Floating point number between 0 and
+        1.0 used to decrease the relevance scores of documents matching
+        the `negative` query.
+    :arg negative: (required) Query used to decrease the relevance score
+        of matching documents.
+    :arg positive: (required) Any returned documents must match this
+        query.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "boosting"
+    _param_defs = {
+        "negative": {"type": "query"},
+        "positive": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        negative_boost: Union[float, "DefaultType"] = DEFAULT,
+        negative: Union[Query, "DefaultType"] = DEFAULT,
+        positive: Union[Query, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            negative_boost=negative_boost,
+            negative=negative,
+            positive=positive,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Common(Query):
+    """
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "common"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.CommonTermsQuery", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class CombinedFields(Query):
+    """
+    The `combined_fields` query supports searching multiple text fields as
+    if their contents had been indexed into one combined field.
+
+    :arg fields: (required) List of fields to search. Field wildcard
+        patterns are allowed. Only `text` fields are supported, and they
+        must all have the same search `analyzer`.
+    :arg query: (required) Text to search for in the provided `fields`.
+        The `combined_fields` query analyzes the provided text before
+        performing a search.
+    :arg auto_generate_synonyms_phrase_query: If true, match phrase
+        queries are automatically created for multi-term synonyms.
+        Defaults to `True` if omitted.
+    :arg operator: Boolean logic used to interpret text in the query
+        value. Defaults to `or` if omitted.
+    :arg minimum_should_match: Minimum number of clauses that must match
+        for a document to be returned.
+    :arg zero_terms_query: Indicates whether no documents are returned if
+        the analyzer removes all tokens, such as when using a `stop`
+        filter. Defaults to `none` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "combined_fields"
+
+    def __init__(
+        self,
+        *,
+        fields: Union[
+            Sequence[Union[str, "InstrumentedField"]], "DefaultType"
+        ] = DEFAULT,
+        query: Union[str, "DefaultType"] = DEFAULT,
+        auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT,
+        operator: Union[Literal["or", "and"], "DefaultType"] = DEFAULT,
+        minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT,
+        zero_terms_query: Union[Literal["none", "all"], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            fields=fields,
+            query=query,
+            auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query,
+            operator=operator,
+            minimum_should_match=minimum_should_match,
+            zero_terms_query=zero_terms_query,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class ConstantScore(Query):
+    """
+    Wraps a filter query and returns every matching document with a
+    relevance score equal to the `boost` parameter value.
+
+    :arg filter: (required) Filter query you wish to run. Any returned
+        documents must match this query. Filter queries do not calculate
+        relevance scores. To speed up performance, Elasticsearch
+        automatically caches frequently used filter queries.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "constant_score"
+    _param_defs = {
+        "filter": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        filter: Union[Query, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(filter=filter, boost=boost, _name=_name, **kwargs)
+
+
+class DisMax(Query):
+    """
+    Returns documents matching one or more wrapped queries, called query
+    clauses or clauses. If a returned document matches multiple query
+    clauses, the `dis_max` query assigns the document the highest
+    relevance score from any matching clause, plus a tie breaking
+    increment for any additional matching subqueries.
+
+    :arg queries: (required) One or more query clauses. Returned documents
+        must match one or more of these queries. If a document matches
+        multiple queries, Elasticsearch uses the highest relevance score.
+    :arg tie_breaker: Floating point number between 0 and 1.0 used to
+        increase the relevance scores of documents matching multiple query
+        clauses.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "dis_max"
+    _param_defs = {
+        "queries": {"type": "query", "multi": True},
+    }
+
+    def __init__(
+        self,
+        *,
+        queries: Union[Sequence[Query], "DefaultType"] = DEFAULT,
+        tie_breaker: Union[float, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            queries=queries, tie_breaker=tie_breaker, boost=boost, _name=_name, **kwargs
+        )
+
+
+class DistanceFeature(Query):
+    """
+    Boosts the relevance score of documents closer to a provided origin
+    date or point. For example, you can use this query to give more weight
+    to documents closer to a certain date or location.
+
+    :arg origin: (required) Date or point of origin used to calculate
+        distances. If the `field` value is a `date` or `date_nanos` field,
+        the `origin` value must be a date. Date Math, such as `now-1h`, is
+        supported. If the field value is a `geo_point` field, the `origin`
+        value must be a geopoint.
+    :arg pivot: (required) Distance from the `origin` at which relevance
+        scores receive half of the `boost` value. If the `field` value is
+        a `date` or `date_nanos` field, the `pivot` value must be a time
+        unit, such as `1h` or `10d`. If the `field` value is a `geo_point`
+        field, the `pivot` value must be a distance unit, such as `1km` or
+        `12m`.
+    :arg field: (required) Name of the field used to calculate distances.
+        This field must meet the following criteria: be a `date`,
+        `date_nanos` or `geo_point` field; have an `index` mapping
+        parameter value of `true`, which is the default; have an
+        `doc_values` mapping parameter value of `true`, which is the
+        default.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "distance_feature"
+
+    def __init__(
+        self,
+        *,
+        origin: Any = DEFAULT,
+        pivot: Any = DEFAULT,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            origin=origin, pivot=pivot, field=field, boost=boost, _name=_name, **kwargs
+        )
+
+
+class Exists(Query):
+    """
+    Returns documents that contain an indexed value for a field.
+
+    :arg field: (required) Name of the field you wish to search.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "exists"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(field=field, boost=boost, _name=_name, **kwargs)
+
+
+class FunctionScore(Query):
+    """
+    The `function_score` enables you to modify the score of documents that
+    are retrieved by a query.
+
+    :arg boost_mode: Defines how he newly computed score is combined with
+        the score of the query Defaults to `multiply` if omitted.
+    :arg functions: One or more functions that compute a new score for
+        each document returned by the query.
+    :arg max_boost: Restricts the new score to not exceed the provided
+        limit.
+    :arg min_score: Excludes documents that do not meet the provided score
+        threshold.
+    :arg query: A query that determines the documents for which a new
+        score is computed.
+    :arg score_mode: Specifies how the computed scores are combined
+        Defaults to `multiply` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "function_score"
+    _param_defs = {
+        "functions": {"type": "score_function", "multi": True},
+        "query": {"type": "query"},
+        "filter": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        boost_mode: Union[
+            Literal["multiply", "replace", "sum", "avg", "max", "min"], "DefaultType"
+        ] = DEFAULT,
+        functions: Union[Sequence[ScoreFunction], "DefaultType"] = DEFAULT,
+        max_boost: Union[float, "DefaultType"] = DEFAULT,
+        min_score: Union[float, "DefaultType"] = DEFAULT,
+        query: Union[Query, "DefaultType"] = DEFAULT,
+        score_mode: Union[
+            Literal["multiply", "sum", "avg", "first", "max", "min"], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if functions is DEFAULT:
+            functions = []
+            for name in ScoreFunction._classes:
+                if name in kwargs:
+                    functions.append({name: kwargs.pop(name)})  # type: ignore[arg-type]
+        super().__init__(
+            boost_mode=boost_mode,
+            functions=functions,
+            max_boost=max_boost,
+            min_score=min_score,
+            query=query,
+            score_mode=score_mode,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Fuzzy(Query):
+    """
+    Returns documents that contain terms similar to the search term, as
+    measured by a Levenshtein edit distance.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "fuzzy"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.FuzzyQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class GeoBoundingBox(Query):
+    """
+    Matches geo_point and geo_shape values that intersect a bounding box.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    :arg type:
+    :arg validation_method: Set to `IGNORE_MALFORMED` to accept geo points
+        with invalid latitude or longitude. Set to `COERCE` to also try to
+        infer correct latitude or longitude. Defaults to `'strict'` if
+        omitted.
+    :arg ignore_unmapped: Set to `true` to ignore an unmapped field and
+        not match any documents for this query. Set to `false` to throw an
+        exception if the field is not mapped.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "geo_bounding_box"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.CoordsGeoBounds",
+            "types.TopLeftBottomRightGeoBounds",
+            "types.TopRightBottomLeftGeoBounds",
+            "types.WktGeoBounds",
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        *,
+        type: Union[Literal["memory", "indexed"], "DefaultType"] = DEFAULT,
+        validation_method: Union[
+            Literal["coerce", "ignore_malformed", "strict"], "DefaultType"
+        ] = DEFAULT,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(
+            type=type,
+            validation_method=validation_method,
+            ignore_unmapped=ignore_unmapped,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class GeoDistance(Query):
+    """
+    Matches `geo_point` and `geo_shape` values within a given distance of
+    a geopoint.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    :arg distance: (required) The radius of the circle centred on the
+        specified location. Points which fall into this circle are
+        considered to be matches.
+    :arg distance_type: How to compute the distance. Set to `plane` for a
+        faster calculation that's inaccurate on long distances and close
+        to the poles. Defaults to `'arc'` if omitted.
+    :arg validation_method: Set to `IGNORE_MALFORMED` to accept geo points
+        with invalid latitude or longitude. Set to `COERCE` to also try to
+        infer correct latitude or longitude. Defaults to `'strict'` if
+        omitted.
+    :arg ignore_unmapped: Set to `true` to ignore an unmapped field and
+        not match any documents for this query. Set to `false` to throw an
+        exception if the field is not mapped.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "geo_distance"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.LatLonGeoLocation",
+            "types.GeoHashLocation",
+            Sequence[float],
+            str,
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        *,
+        distance: Union[str, "DefaultType"] = DEFAULT,
+        distance_type: Union[Literal["arc", "plane"], "DefaultType"] = DEFAULT,
+        validation_method: Union[
+            Literal["coerce", "ignore_malformed", "strict"], "DefaultType"
+        ] = DEFAULT,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(
+            distance=distance,
+            distance_type=distance_type,
+            validation_method=validation_method,
+            ignore_unmapped=ignore_unmapped,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class GeoGrid(Query):
+    """
+    Matches `geo_point` and `geo_shape` values that intersect a grid cell
+    from a GeoGrid aggregation.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "geo_grid"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.GeoGridQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class GeoPolygon(Query):
+    """
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    :arg validation_method:  Defaults to `'strict'` if omitted.
+    :arg ignore_unmapped:
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "geo_polygon"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.GeoPolygonPoints", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        *,
+        validation_method: Union[
+            Literal["coerce", "ignore_malformed", "strict"], "DefaultType"
+        ] = DEFAULT,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(
+            validation_method=validation_method,
+            ignore_unmapped=ignore_unmapped,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class GeoShape(Query):
+    """
+    Filter documents indexed using either the `geo_shape` or the
+    `geo_point` type.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    :arg ignore_unmapped: Set to `true` to ignore an unmapped field and
+        not match any documents for this query. Set to `false` to throw an
+        exception if the field is not mapped.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "geo_shape"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.GeoShapeFieldQuery", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        *,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(
+            ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs
+        )
+
+
+class HasChild(Query):
+    """
+    Returns parent documents whose joined child documents match a provided
+    query.
+
+    :arg query: (required) Query you wish to run on child documents of the
+        `type` field. If a child document matches the search, the query
+        returns the parent document.
+    :arg type: (required) Name of the child relationship mapped for the
+        `join` field.
+    :arg ignore_unmapped: Indicates whether to ignore an unmapped `type`
+        and not return any documents instead of an error.
+    :arg inner_hits: If defined, each search hit will contain inner hits.
+    :arg max_children: Maximum number of child documents that match the
+        query allowed for a returned parent document. If the parent
+        document exceeds this limit, it is excluded from the search
+        results.
+    :arg min_children: Minimum number of child documents that match the
+        query required to match the query for a returned parent document.
+        If the parent document does not meet this limit, it is excluded
+        from the search results.
+    :arg score_mode: Indicates how scores for matching child documents
+        affect the root parent document’s relevance score. Defaults to
+        `'none'` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "has_child"
+    _param_defs = {
+        "query": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        query: Union[Query, "DefaultType"] = DEFAULT,
+        type: Union[str, "DefaultType"] = DEFAULT,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT,
+        max_children: Union[int, "DefaultType"] = DEFAULT,
+        min_children: Union[int, "DefaultType"] = DEFAULT,
+        score_mode: Union[
+            Literal["none", "avg", "sum", "max", "min"], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            query=query,
+            type=type,
+            ignore_unmapped=ignore_unmapped,
+            inner_hits=inner_hits,
+            max_children=max_children,
+            min_children=min_children,
+            score_mode=score_mode,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class HasParent(Query):
+    """
+    Returns child documents whose joined parent document matches a
+    provided query.
+
+    :arg parent_type: (required) Name of the parent relationship mapped
+        for the `join` field.
+    :arg query: (required) Query you wish to run on parent documents of
+        the `parent_type` field. If a parent document matches the search,
+        the query returns its child documents.
+    :arg ignore_unmapped: Indicates whether to ignore an unmapped
+        `parent_type` and not return any documents instead of an error.
+        You can use this parameter to query multiple indices that may not
+        contain the `parent_type`.
+    :arg inner_hits: If defined, each search hit will contain inner hits.
+    :arg score: Indicates whether the relevance score of a matching parent
+        document is aggregated into its child documents.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "has_parent"
+    _param_defs = {
+        "query": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        parent_type: Union[str, "DefaultType"] = DEFAULT,
+        query: Union[Query, "DefaultType"] = DEFAULT,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT,
+        score: Union[bool, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            parent_type=parent_type,
+            query=query,
+            ignore_unmapped=ignore_unmapped,
+            inner_hits=inner_hits,
+            score=score,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Ids(Query):
+    """
+    Returns documents based on their IDs. This query uses document IDs
+    stored in the `_id` field.
+
+    :arg values: An array of document IDs.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "ids"
+
+    def __init__(
+        self,
+        *,
+        values: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(values=values, boost=boost, _name=_name, **kwargs)
+
+
+class Intervals(Query):
+    """
+    Returns documents based on the order and proximity of matching terms.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "intervals"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.IntervalsQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class Knn(Query):
+    """
+    Finds the k nearest vectors to a query vector, as measured by a
+    similarity metric. knn query finds nearest vectors through approximate
+    search on indexed dense_vectors.
+
+    :arg field: (required) The name of the vector field to search against
+    :arg query_vector: The query vector
+    :arg query_vector_builder: The query vector builder. You must provide
+        a query_vector_builder or query_vector, but not both.
+    :arg num_candidates: The number of nearest neighbor candidates to
+        consider per shard
+    :arg visit_percentage: The percentage of vectors to explore per shard
+        while doing knn search with bbq_disk
+    :arg k: The final number of nearest neighbors to return as top hits
+    :arg filter: Filters for the kNN search query
+    :arg similarity: The minimum similarity for a vector to be considered
+        a match
+    :arg rescore_vector: Apply oversampling and rescoring to quantized
+        vectors
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "knn"
+    _param_defs = {
+        "filter": {"type": "query", "multi": True},
+    }
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        query_vector: Union[Sequence[float], "DefaultType"] = DEFAULT,
+        query_vector_builder: Union[
+            "types.QueryVectorBuilder", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        num_candidates: Union[int, "DefaultType"] = DEFAULT,
+        visit_percentage: Union[float, "DefaultType"] = DEFAULT,
+        k: Union[int, "DefaultType"] = DEFAULT,
+        filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT,
+        similarity: Union[float, "DefaultType"] = DEFAULT,
+        rescore_vector: Union[
+            "types.RescoreVector", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            query_vector=query_vector,
+            query_vector_builder=query_vector_builder,
+            num_candidates=num_candidates,
+            visit_percentage=visit_percentage,
+            k=k,
+            filter=filter,
+            similarity=similarity,
+            rescore_vector=rescore_vector,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Match(Query):
+    """
+    Returns documents that match a provided text, number, date or boolean
+    value. The provided text is analyzed before matching.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "match"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.MatchQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class MatchAll(Query):
+    """
+    Matches all documents, giving them all a `_score` of 1.0.
+
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "match_all"
+
+    def __init__(
+        self,
+        *,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(boost=boost, _name=_name, **kwargs)
+
+    def __add__(self, other: "Query") -> "Query":
+        return other._clone()
+
+    __and__ = __rand__ = __radd__ = __add__
+
+    def __or__(self, other: "Query") -> "MatchAll":
+        return self
+
+    __ror__ = __or__
+
+    def __invert__(self) -> "MatchNone":
+        return MatchNone()
+
+
+EMPTY_QUERY = MatchAll()
+
+
+class MatchBoolPrefix(Query):
+    """
+    Analyzes its input and constructs a `bool` query from the terms. Each
+    term except the last is used in a `term` query. The last term is used
+    in a prefix query.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "match_bool_prefix"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.MatchBoolPrefixQuery", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class MatchNone(Query):
+    """
+    Matches no documents.
+
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "match_none"
+
+    def __init__(
+        self,
+        *,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(boost=boost, _name=_name, **kwargs)
+
+    def __add__(self, other: "Query") -> "MatchNone":
+        return self
+
+    __and__ = __rand__ = __radd__ = __add__
+
+    def __or__(self, other: "Query") -> "Query":
+        return other._clone()
+
+    __ror__ = __or__
+
+    def __invert__(self) -> MatchAll:
+        return MatchAll()
+
+
+class MatchPhrase(Query):
+    """
+    Analyzes the text and creates a phrase query out of the analyzed text.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "match_phrase"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.MatchPhraseQuery", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class MatchPhrasePrefix(Query):
+    """
+    Returns documents that contain the words of a provided text, in the
+    same order as provided. The last term of the provided text is treated
+    as a prefix, matching any words that begin with that term.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "match_phrase_prefix"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.MatchPhrasePrefixQuery", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class MoreLikeThis(Query):
+    """
+    Returns documents that are "like" a given set of documents.
+
+    :arg like: (required) Specifies free form text and/or a single or
+        multiple documents for which you want to find similar documents.
+    :arg analyzer: The analyzer that is used to analyze the free form
+        text. Defaults to the analyzer associated with the first field in
+        fields.
+    :arg boost_terms: Each term in the formed query could be further
+        boosted by their tf-idf score. This sets the boost factor to use
+        when using this feature. Defaults to deactivated (0).
+    :arg fail_on_unsupported_field: Controls whether the query should fail
+        (throw an exception) if any of the specified fields are not of the
+        supported types (`text` or `keyword`). Defaults to `True` if
+        omitted.
+    :arg fields: A list of fields to fetch and analyze the text from.
+        Defaults to the `index.query.default_field` index setting, which
+        has a default value of `*`.
+    :arg include: Specifies whether the input documents should also be
+        included in the search results returned.
+    :arg max_doc_freq: The maximum document frequency above which the
+        terms are ignored from the input document.
+    :arg max_query_terms: The maximum number of query terms that can be
+        selected. Defaults to `25` if omitted.
+    :arg max_word_length: The maximum word length above which the terms
+        are ignored. Defaults to unbounded (`0`).
+    :arg min_doc_freq: The minimum document frequency below which the
+        terms are ignored from the input document. Defaults to `5` if
+        omitted.
+    :arg minimum_should_match: After the disjunctive query has been
+        formed, this parameter controls the number of terms that must
+        match.
+    :arg min_term_freq: The minimum term frequency below which the terms
+        are ignored from the input document. Defaults to `2` if omitted.
+    :arg min_word_length: The minimum word length below which the terms
+        are ignored.
+    :arg routing:
+    :arg stop_words: An array of stop words. Any word in this set is
+        ignored.
+    :arg unlike: Used in combination with `like` to exclude documents that
+        match a set of terms.
+    :arg version:
+    :arg version_type:  Defaults to `'internal'` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "more_like_this"
+
+    def __init__(
+        self,
+        *,
+        like: Union[
+            Union[str, "types.LikeDocument"],
+            Sequence[Union[str, "types.LikeDocument"]],
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        analyzer: Union[str, "DefaultType"] = DEFAULT,
+        boost_terms: Union[float, "DefaultType"] = DEFAULT,
+        fail_on_unsupported_field: Union[bool, "DefaultType"] = DEFAULT,
+        fields: Union[
+            Sequence[Union[str, "InstrumentedField"]], "DefaultType"
+        ] = DEFAULT,
+        include: Union[bool, "DefaultType"] = DEFAULT,
+        max_doc_freq: Union[int, "DefaultType"] = DEFAULT,
+        max_query_terms: Union[int, "DefaultType"] = DEFAULT,
+        max_word_length: Union[int, "DefaultType"] = DEFAULT,
+        min_doc_freq: Union[int, "DefaultType"] = DEFAULT,
+        minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT,
+        min_term_freq: Union[int, "DefaultType"] = DEFAULT,
+        min_word_length: Union[int, "DefaultType"] = DEFAULT,
+        routing: Union[str, "DefaultType"] = DEFAULT,
+        stop_words: Union[
+            Literal[
+                "_arabic_",
+                "_armenian_",
+                "_basque_",
+                "_bengali_",
+                "_brazilian_",
+                "_bulgarian_",
+                "_catalan_",
+                "_cjk_",
+                "_czech_",
+                "_danish_",
+                "_dutch_",
+                "_english_",
+                "_estonian_",
+                "_finnish_",
+                "_french_",
+                "_galician_",
+                "_german_",
+                "_greek_",
+                "_hindi_",
+                "_hungarian_",
+                "_indonesian_",
+                "_irish_",
+                "_italian_",
+                "_latvian_",
+                "_lithuanian_",
+                "_norwegian_",
+                "_persian_",
+                "_portuguese_",
+                "_romanian_",
+                "_russian_",
+                "_serbian_",
+                "_sorani_",
+                "_spanish_",
+                "_swedish_",
+                "_thai_",
+                "_turkish_",
+                "_none_",
+            ],
+            Sequence[str],
+            "DefaultType",
+        ] = DEFAULT,
+        unlike: Union[
+            Union[str, "types.LikeDocument"],
+            Sequence[Union[str, "types.LikeDocument"]],
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        version: Union[int, "DefaultType"] = DEFAULT,
+        version_type: Union[
+            Literal["internal", "external", "external_gte"], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            like=like,
+            analyzer=analyzer,
+            boost_terms=boost_terms,
+            fail_on_unsupported_field=fail_on_unsupported_field,
+            fields=fields,
+            include=include,
+            max_doc_freq=max_doc_freq,
+            max_query_terms=max_query_terms,
+            max_word_length=max_word_length,
+            min_doc_freq=min_doc_freq,
+            minimum_should_match=minimum_should_match,
+            min_term_freq=min_term_freq,
+            min_word_length=min_word_length,
+            routing=routing,
+            stop_words=stop_words,
+            unlike=unlike,
+            version=version,
+            version_type=version_type,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class MultiMatch(Query):
+    """
+    Enables you to search for a provided text, number, date or boolean
+    value across multiple fields. The provided text is analyzed before
+    matching.
+
+    :arg query: (required) Text, number, boolean value or date you wish to
+        find in the provided field.
+    :arg analyzer: Analyzer used to convert the text in the query value
+        into tokens.
+    :arg auto_generate_synonyms_phrase_query: If `true`, match phrase
+        queries are automatically created for multi-term synonyms.
+        Defaults to `True` if omitted.
+    :arg cutoff_frequency:
+    :arg fields: The fields to be queried. Defaults to the
+        `index.query.default_field` index settings, which in turn defaults
+        to `*`.
+    :arg fuzziness: Maximum edit distance allowed for matching.
+    :arg fuzzy_rewrite: Method used to rewrite the query.
+    :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
+        transpositions of two adjacent characters (for example, `ab` to
+        `ba`). Can be applied to the term subqueries constructed for all
+        terms but the final term. Defaults to `True` if omitted.
+    :arg lenient: If `true`, format-based errors, such as providing a text
+        query value for a numeric field, are ignored.
+    :arg max_expansions: Maximum number of terms to which the query will
+        expand. Defaults to `50` if omitted.
+    :arg minimum_should_match: Minimum number of clauses that must match
+        for a document to be returned.
+    :arg operator: Boolean logic used to interpret text in the query
+        value. Defaults to `'or'` if omitted.
+    :arg prefix_length: Number of beginning characters left unchanged for
+        fuzzy matching.
+    :arg slop: Maximum number of positions allowed between matching
+        tokens.
+    :arg tie_breaker: Determines how scores for each per-term blended
+        query and scores across groups are combined.
+    :arg type: How `the` multi_match query is executed internally.
+        Defaults to `'best_fields'` if omitted.
+    :arg zero_terms_query: Indicates whether no documents are returned if
+        the `analyzer` removes all tokens, such as when using a `stop`
+        filter. Defaults to `'none'` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "multi_match"
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, "DefaultType"] = DEFAULT,
+        analyzer: Union[str, "DefaultType"] = DEFAULT,
+        auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT,
+        cutoff_frequency: Union[float, "DefaultType"] = DEFAULT,
+        fields: Union[
+            Union[str, "InstrumentedField"],
+            Sequence[Union[str, "InstrumentedField"]],
+            "DefaultType",
+        ] = DEFAULT,
+        fuzziness: Union[str, int, "DefaultType"] = DEFAULT,
+        fuzzy_rewrite: Union[str, "DefaultType"] = DEFAULT,
+        fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT,
+        lenient: Union[bool, "DefaultType"] = DEFAULT,
+        max_expansions: Union[int, "DefaultType"] = DEFAULT,
+        minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT,
+        operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT,
+        prefix_length: Union[int, "DefaultType"] = DEFAULT,
+        slop: Union[int, "DefaultType"] = DEFAULT,
+        tie_breaker: Union[float, "DefaultType"] = DEFAULT,
+        type: Union[
+            Literal[
+                "best_fields",
+                "most_fields",
+                "cross_fields",
+                "phrase",
+                "phrase_prefix",
+                "bool_prefix",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        zero_terms_query: Union[Literal["all", "none"], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            query=query,
+            analyzer=analyzer,
+            auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query,
+            cutoff_frequency=cutoff_frequency,
+            fields=fields,
+            fuzziness=fuzziness,
+            fuzzy_rewrite=fuzzy_rewrite,
+            fuzzy_transpositions=fuzzy_transpositions,
+            lenient=lenient,
+            max_expansions=max_expansions,
+            minimum_should_match=minimum_should_match,
+            operator=operator,
+            prefix_length=prefix_length,
+            slop=slop,
+            tie_breaker=tie_breaker,
+            type=type,
+            zero_terms_query=zero_terms_query,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Nested(Query):
+    """
+    Wraps another query to search nested fields. If an object matches the
+    search, the nested query returns the root parent document.
+
+    :arg path: (required) Path to the nested object you wish to search.
+    :arg query: (required) Query you wish to run on nested objects in the
+        path.
+    :arg ignore_unmapped: Indicates whether to ignore an unmapped path and
+        not return any documents instead of an error.
+    :arg inner_hits: If defined, each search hit will contain inner hits.
+    :arg score_mode: How scores for matching child objects affect the root
+        parent document’s relevance score. Defaults to `'avg'` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "nested"
+    _param_defs = {
+        "query": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        query: Union[Query, "DefaultType"] = DEFAULT,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT,
+        score_mode: Union[
+            Literal["none", "avg", "sum", "max", "min"], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            path=path,
+            query=query,
+            ignore_unmapped=ignore_unmapped,
+            inner_hits=inner_hits,
+            score_mode=score_mode,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class ParentId(Query):
+    """
+    Returns child documents joined to a specific parent document.
+
+    :arg id: ID of the parent document.
+    :arg ignore_unmapped: Indicates whether to ignore an unmapped `type`
+        and not return any documents instead of an error.
+    :arg type: Name of the child relationship mapped for the `join` field.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "parent_id"
+
+    def __init__(
+        self,
+        *,
+        id: Union[str, "DefaultType"] = DEFAULT,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        type: Union[str, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            id=id,
+            ignore_unmapped=ignore_unmapped,
+            type=type,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Percolate(Query):
+    """
+    Matches queries stored in an index.
+
+    :arg field: (required) Field that holds the indexed queries. The field
+        must use the `percolator` mapping type.
+    :arg document: The source of the document being percolated.
+    :arg documents: An array of sources of the documents being percolated.
+    :arg id: The ID of a stored document to percolate.
+    :arg index: The index of a stored document to percolate.
+    :arg name: The suffix used for the `_percolator_document_slot` field
+        when multiple `percolate` queries are specified.
+    :arg preference: Preference used to fetch document to percolate.
+    :arg routing: Routing used to fetch document to percolate.
+    :arg version: The expected version of a stored document to percolate.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "percolate"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        document: Any = DEFAULT,
+        documents: Union[Sequence[Any], "DefaultType"] = DEFAULT,
+        id: Union[str, "DefaultType"] = DEFAULT,
+        index: Union[str, "DefaultType"] = DEFAULT,
+        name: Union[str, "DefaultType"] = DEFAULT,
+        preference: Union[str, "DefaultType"] = DEFAULT,
+        routing: Union[str, "DefaultType"] = DEFAULT,
+        version: Union[int, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            document=document,
+            documents=documents,
+            id=id,
+            index=index,
+            name=name,
+            preference=preference,
+            routing=routing,
+            version=version,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Pinned(Query):
+    """
+    Promotes selected documents to rank higher than those matching a given
+    query.
+
+    :arg organic: (required) Any choice of query used to rank documents
+        which will be ranked below the "pinned" documents.
+    :arg ids: Document IDs listed in the order they are to appear in
+        results. Required if `docs` is not specified.
+    :arg docs: Documents listed in the order they are to appear in
+        results. Required if `ids` is not specified.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "pinned"
+    _param_defs = {
+        "organic": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        organic: Union[Query, "DefaultType"] = DEFAULT,
+        ids: Union[Sequence[str], "DefaultType"] = DEFAULT,
+        docs: Union[
+            Sequence["types.PinnedDoc"], Sequence[Dict[str, Any]], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            organic=organic, ids=ids, docs=docs, boost=boost, _name=_name, **kwargs
+        )
+
+
+class Prefix(Query):
+    """
+    Returns documents that contain a specific prefix in a provided field.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "prefix"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.PrefixQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class QueryString(Query):
+    """
+    Returns documents based on a provided query string, using a parser
+    with a strict syntax.
+
+    :arg query: (required) Query string you wish to parse and use for
+        search.
+    :arg allow_leading_wildcard: If `true`, the wildcard characters `*`
+        and `?` are allowed as the first character of the query string.
+        Defaults to `True` if omitted.
+    :arg analyzer: Analyzer used to convert text in the query string into
+        tokens.
+    :arg analyze_wildcard: If `true`, the query attempts to analyze
+        wildcard terms in the query string.
+    :arg auto_generate_synonyms_phrase_query: If `true`, match phrase
+        queries are automatically created for multi-term synonyms.
+        Defaults to `True` if omitted.
+    :arg default_field: Default field to search if no field is provided in
+        the query string. Supports wildcards (`*`). Defaults to the
+        `index.query.default_field` index setting, which has a default
+        value of `*`.
+    :arg default_operator: Default boolean logic used to interpret text in
+        the query string if no operators are specified. Defaults to `'or'`
+        if omitted.
+    :arg enable_position_increments: If `true`, enable position increments
+        in queries constructed from a `query_string` search. Defaults to
+        `True` if omitted.
+    :arg escape:
+    :arg fields: Array of fields to search. Supports wildcards (`*`).
+    :arg fuzziness: Maximum edit distance allowed for fuzzy matching.
+    :arg fuzzy_max_expansions: Maximum number of terms to which the query
+        expands for fuzzy matching. Defaults to `50` if omitted.
+    :arg fuzzy_prefix_length: Number of beginning characters left
+        unchanged for fuzzy matching.
+    :arg fuzzy_rewrite: Method used to rewrite the query.
+    :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
+        transpositions of two adjacent characters (for example, `ab` to
+        `ba`). Defaults to `True` if omitted.
+    :arg lenient: If `true`, format-based errors, such as providing a text
+        value for a numeric field, are ignored.
+    :arg max_determinized_states: Maximum number of automaton states
+        required for the query. Defaults to `10000` if omitted.
+    :arg minimum_should_match: Minimum number of clauses that must match
+        for a document to be returned.
+    :arg phrase_slop: Maximum number of positions allowed between matching
+        tokens for phrases.
+    :arg quote_analyzer: Analyzer used to convert quoted text in the query
+        string into tokens. For quoted text, this parameter overrides the
+        analyzer specified in the `analyzer` parameter.
+    :arg quote_field_suffix: Suffix appended to quoted text in the query
+        string. You can use this suffix to use a different analysis method
+        for exact matches.
+    :arg rewrite: Method used to rewrite the query.
+    :arg tie_breaker: How to combine the queries generated from the
+        individual search terms in the resulting `dis_max` query.
+    :arg time_zone: Coordinated Universal Time (UTC) offset or IANA time
+        zone used to convert date values in the query string to UTC.
+    :arg type: Determines how the query matches and scores documents.
+        Defaults to `'best_fields'` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "query_string"
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, "DefaultType"] = DEFAULT,
+        allow_leading_wildcard: Union[bool, "DefaultType"] = DEFAULT,
+        analyzer: Union[str, "DefaultType"] = DEFAULT,
+        analyze_wildcard: Union[bool, "DefaultType"] = DEFAULT,
+        auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT,
+        default_field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        default_operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT,
+        enable_position_increments: Union[bool, "DefaultType"] = DEFAULT,
+        escape: Union[bool, "DefaultType"] = DEFAULT,
+        fields: Union[
+            Sequence[Union[str, "InstrumentedField"]], "DefaultType"
+        ] = DEFAULT,
+        fuzziness: Union[str, int, "DefaultType"] = DEFAULT,
+        fuzzy_max_expansions: Union[int, "DefaultType"] = DEFAULT,
+        fuzzy_prefix_length: Union[int, "DefaultType"] = DEFAULT,
+        fuzzy_rewrite: Union[str, "DefaultType"] = DEFAULT,
+        fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT,
+        lenient: Union[bool, "DefaultType"] = DEFAULT,
+        max_determinized_states: Union[int, "DefaultType"] = DEFAULT,
+        minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT,
+        phrase_slop: Union[float, "DefaultType"] = DEFAULT,
+        quote_analyzer: Union[str, "DefaultType"] = DEFAULT,
+        quote_field_suffix: Union[str, "DefaultType"] = DEFAULT,
+        rewrite: Union[str, "DefaultType"] = DEFAULT,
+        tie_breaker: Union[float, "DefaultType"] = DEFAULT,
+        time_zone: Union[str, "DefaultType"] = DEFAULT,
+        type: Union[
+            Literal[
+                "best_fields",
+                "most_fields",
+                "cross_fields",
+                "phrase",
+                "phrase_prefix",
+                "bool_prefix",
+            ],
+            "DefaultType",
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            query=query,
+            allow_leading_wildcard=allow_leading_wildcard,
+            analyzer=analyzer,
+            analyze_wildcard=analyze_wildcard,
+            auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query,
+            default_field=default_field,
+            default_operator=default_operator,
+            enable_position_increments=enable_position_increments,
+            escape=escape,
+            fields=fields,
+            fuzziness=fuzziness,
+            fuzzy_max_expansions=fuzzy_max_expansions,
+            fuzzy_prefix_length=fuzzy_prefix_length,
+            fuzzy_rewrite=fuzzy_rewrite,
+            fuzzy_transpositions=fuzzy_transpositions,
+            lenient=lenient,
+            max_determinized_states=max_determinized_states,
+            minimum_should_match=minimum_should_match,
+            phrase_slop=phrase_slop,
+            quote_analyzer=quote_analyzer,
+            quote_field_suffix=quote_field_suffix,
+            rewrite=rewrite,
+            tie_breaker=tie_breaker,
+            time_zone=time_zone,
+            type=type,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Range(Query):
+    """
+    Returns documents that contain terms within a provided range.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "range"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["wrappers.Range[Any]", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class RankFeature(Query):
+    """
+    Boosts the relevance score of documents based on the numeric value of
+    a `rank_feature` or `rank_features` field.
+
+    :arg field: (required) `rank_feature` or `rank_features` field used to
+        boost relevance scores.
+    :arg saturation: Saturation function used to boost relevance scores
+        based on the value of the rank feature `field`.
+    :arg log: Logarithmic function used to boost relevance scores based on
+        the value of the rank feature `field`.
+    :arg linear: Linear function used to boost relevance scores based on
+        the value of the rank feature `field`.
+    :arg sigmoid: Sigmoid function used to boost relevance scores based on
+        the value of the rank feature `field`.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "rank_feature"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        saturation: Union[
+            "types.RankFeatureFunctionSaturation", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        log: Union[
+            "types.RankFeatureFunctionLogarithm", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        linear: Union[
+            "types.RankFeatureFunctionLinear", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        sigmoid: Union[
+            "types.RankFeatureFunctionSigmoid", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            saturation=saturation,
+            log=log,
+            linear=linear,
+            sigmoid=sigmoid,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Regexp(Query):
+    """
+    Returns documents that contain terms matching a regular expression.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "regexp"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.RegexpQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class Rule(Query):
+    """
+    :arg organic: (required)
+    :arg match_criteria: (required)
+    :arg ruleset_ids:
+    :arg ruleset_id:
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "rule"
+    _param_defs = {
+        "organic": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        organic: Union[Query, "DefaultType"] = DEFAULT,
+        match_criteria: Any = DEFAULT,
+        ruleset_ids: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
+        ruleset_id: Union[str, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            organic=organic,
+            match_criteria=match_criteria,
+            ruleset_ids=ruleset_ids,
+            ruleset_id=ruleset_id,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Script(Query):
+    """
+    Filters documents based on a provided script. The script query is
+    typically used in a filter context.
+
+    :arg script: (required) Contains a script to run as a query. This
+        script must return a boolean value, `true` or `false`.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "script"
+
+    def __init__(
+        self,
+        *,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(script=script, boost=boost, _name=_name, **kwargs)
+
+
+class ScriptScore(Query):
+    """
+    Uses a script to provide a custom score for returned documents.
+
+    :arg query: (required) Query used to return documents.
+    :arg script: (required) Script used to compute the score of documents
+        returned by the query. Important: final relevance scores from the
+        `script_score` query cannot be negative.
+    :arg min_score: Documents with a score lower than this floating point
+        number are excluded from the search results.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "script_score"
+    _param_defs = {
+        "query": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        query: Union[Query, "DefaultType"] = DEFAULT,
+        script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
+        min_score: Union[float, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            query=query,
+            script=script,
+            min_score=min_score,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Semantic(Query):
+    """
+    A semantic query to semantic_text field types
+
+    :arg field: (required) The field to query, which must be a
+        semantic_text field type
+    :arg query: (required) The query text
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "semantic"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "DefaultType"] = DEFAULT,
+        query: Union[str, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(field=field, query=query, boost=boost, _name=_name, **kwargs)
+
+
+class Shape(Query):
+    """
+    Queries documents that contain fields indexed using the `shape` type.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    :arg ignore_unmapped: When set to `true` the query ignores an unmapped
+        field and will not match any documents.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "shape"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.ShapeFieldQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        *,
+        ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(
+            ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs
+        )
+
+
+class SimpleQueryString(Query):
+    """
+    Returns documents based on a provided query string, using a parser
+    with a limited but fault-tolerant syntax.
+
+    :arg query: (required) Query string in the simple query string syntax
+        you wish to parse and use for search.
+    :arg analyzer: Analyzer used to convert text in the query string into
+        tokens.
+    :arg analyze_wildcard: If `true`, the query attempts to analyze
+        wildcard terms in the query string.
+    :arg auto_generate_synonyms_phrase_query: If `true`, the parser
+        creates a match_phrase query for each multi-position token.
+        Defaults to `True` if omitted.
+    :arg default_operator: Default boolean logic used to interpret text in
+        the query string if no operators are specified. Defaults to `'or'`
+        if omitted.
+    :arg fields: Array of fields you wish to search. Accepts wildcard
+        expressions. You also can boost relevance scores for matches to
+        particular fields using a caret (`^`) notation. Defaults to the
+        `index.query.default_field index` setting, which has a default
+        value of `*`.
+    :arg flags: List of enabled operators for the simple query string
+        syntax. Defaults to `ALL` if omitted.
+    :arg fuzzy_max_expansions: Maximum number of terms to which the query
+        expands for fuzzy matching. Defaults to `50` if omitted.
+    :arg fuzzy_prefix_length: Number of beginning characters left
+        unchanged for fuzzy matching.
+    :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
+        transpositions of two adjacent characters (for example, `ab` to
+        `ba`).
+    :arg lenient: If `true`, format-based errors, such as providing a text
+        value for a numeric field, are ignored.
+    :arg minimum_should_match: Minimum number of clauses that must match
+        for a document to be returned.
+    :arg quote_field_suffix: Suffix appended to quoted text in the query
+        string.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "simple_query_string"
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, "DefaultType"] = DEFAULT,
+        analyzer: Union[str, "DefaultType"] = DEFAULT,
+        analyze_wildcard: Union[bool, "DefaultType"] = DEFAULT,
+        auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT,
+        default_operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT,
+        fields: Union[
+            Sequence[Union[str, "InstrumentedField"]], "DefaultType"
+        ] = DEFAULT,
+        flags: Union[
+            "types.PipeSeparatedFlags", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        fuzzy_max_expansions: Union[int, "DefaultType"] = DEFAULT,
+        fuzzy_prefix_length: Union[int, "DefaultType"] = DEFAULT,
+        fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT,
+        lenient: Union[bool, "DefaultType"] = DEFAULT,
+        minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT,
+        quote_field_suffix: Union[str, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            query=query,
+            analyzer=analyzer,
+            analyze_wildcard=analyze_wildcard,
+            auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query,
+            default_operator=default_operator,
+            fields=fields,
+            flags=flags,
+            fuzzy_max_expansions=fuzzy_max_expansions,
+            fuzzy_prefix_length=fuzzy_prefix_length,
+            fuzzy_transpositions=fuzzy_transpositions,
+            lenient=lenient,
+            minimum_should_match=minimum_should_match,
+            quote_field_suffix=quote_field_suffix,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class SpanContaining(Query):
+    """
+    Returns matches which enclose another span query.
+
+    :arg big: (required) Can be any span query. Matching spans from `big`
+        that contain matches from `little` are returned.
+    :arg little: (required) Can be any span query. Matching spans from
+        `big` that contain matches from `little` are returned.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "span_containing"
+
+    def __init__(
+        self,
+        *,
+        big: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        little: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(big=big, little=little, boost=boost, _name=_name, **kwargs)
+
+
+class SpanFieldMasking(Query):
+    """
+    Wrapper to allow span queries to participate in composite single-field
+    span queries by _lying_ about their search field.
+
+    :arg field: (required)
+    :arg query: (required)
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "span_field_masking"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        query: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(field=field, query=query, boost=boost, _name=_name, **kwargs)
+
+
+class SpanFirst(Query):
+    """
+    Matches spans near the beginning of a field.
+
+    :arg end: (required) Controls the maximum end position permitted in a
+        match.
+    :arg match: (required) Can be any other span type query.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "span_first"
+
+    def __init__(
+        self,
+        *,
+        end: Union[int, "DefaultType"] = DEFAULT,
+        match: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(end=end, match=match, boost=boost, _name=_name, **kwargs)
+
+
+class SpanMulti(Query):
+    """
+    Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`,
+    `prefix`, `range`, or `regexp` query) as a `span` query, so it can be
+    nested.
+
+    :arg match: (required) Should be a multi term query (one of
+        `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query).
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "span_multi"
+    _param_defs = {
+        "match": {"type": "query"},
+    }
+
+    def __init__(
+        self,
+        *,
+        match: Union[Query, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(match=match, boost=boost, _name=_name, **kwargs)
+
+
+class SpanNear(Query):
+    """
+    Matches spans which are near one another. You can specify `slop`, the
+    maximum number of intervening unmatched positions, as well as whether
+    matches are required to be in-order.
+
+    :arg clauses: (required) Array of one or more other span type queries.
+    :arg in_order: Controls whether matches are required to be in-order.
+    :arg slop: Controls the maximum number of intervening unmatched
+        positions permitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "span_near"
+
+    def __init__(
+        self,
+        *,
+        clauses: Union[
+            Sequence["types.SpanQuery"], Sequence[Dict[str, Any]], "DefaultType"
+        ] = DEFAULT,
+        in_order: Union[bool, "DefaultType"] = DEFAULT,
+        slop: Union[int, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            clauses=clauses,
+            in_order=in_order,
+            slop=slop,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class SpanNot(Query):
+    """
+    Removes matches which overlap with another span query or which are
+    within x tokens before (controlled by the parameter `pre`) or y tokens
+    after (controlled by the parameter `post`) another span query.
+
+    :arg exclude: (required) Span query whose matches must not overlap
+        those returned.
+    :arg include: (required) Span query whose matches are filtered.
+    :arg dist: The number of tokens from within the include span that
+        can’t have overlap with the exclude span. Equivalent to setting
+        both `pre` and `post`.
+    :arg post: The number of tokens after the include span that can’t have
+        overlap with the exclude span.
+    :arg pre: The number of tokens before the include span that can’t have
+        overlap with the exclude span.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "span_not"
+
+    def __init__(
+        self,
+        *,
+        exclude: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        include: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        dist: Union[int, "DefaultType"] = DEFAULT,
+        post: Union[int, "DefaultType"] = DEFAULT,
+        pre: Union[int, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            exclude=exclude,
+            include=include,
+            dist=dist,
+            post=post,
+            pre=pre,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class SpanOr(Query):
+    """
+    Matches the union of its span clauses.
+
+    :arg clauses: (required) Array of one or more other span type queries.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "span_or"
+
+    def __init__(
+        self,
+        *,
+        clauses: Union[
+            Sequence["types.SpanQuery"], Sequence[Dict[str, Any]], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(clauses=clauses, boost=boost, _name=_name, **kwargs)
+
+
+class SpanTerm(Query):
+    """
+    Matches spans containing a term.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "span_term"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.SpanTermQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class SpanWithin(Query):
+    """
+    Returns matches which are enclosed inside another span query.
+
+    :arg big: (required) Can be any span query. Matching spans from
+        `little` that are enclosed within `big` are returned.
+    :arg little: (required) Can be any span query. Matching spans from
+        `little` that are enclosed within `big` are returned.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "span_within"
+
+    def __init__(
+        self,
+        *,
+        big: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        little: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(big=big, little=little, boost=boost, _name=_name, **kwargs)
+
+
+class SparseVector(Query):
+    """
+    Using input query vectors or a natural language processing model to
+    convert a query into a list of token-weight pairs, queries against a
+    sparse vector field.
+
+    :arg field: (required) The name of the field that contains the token-
+        weight pairs to be searched against. This field must be a mapped
+        sparse_vector field.
+    :arg query_vector: Dictionary of precomputed sparse vectors and their
+        associated weights. Only one of inference_id or query_vector may
+        be supplied in a request.
+    :arg inference_id: The inference ID to use to convert the query text
+        into token-weight pairs. It must be the same inference ID that was
+        used to create the tokens from the input text. Only one of
+        inference_id and query_vector is allowed. If inference_id is
+        specified, query must also be specified. Only one of inference_id
+        or query_vector may be supplied in a request.
+    :arg query: The query text you want to use for search. If inference_id
+        is specified, query must also be specified.
+    :arg prune: Whether to perform pruning, omitting the non-significant
+        tokens from the query to improve query performance. If prune is
+        true but the pruning_config is not specified, pruning will occur
+        but default values will be used. Default: false
+    :arg pruning_config: Optional pruning configuration. If enabled, this
+        will omit non-significant tokens from the query in order to
+        improve query performance. This is only used if prune is set to
+        true. If prune is set to true but pruning_config is not specified,
+        default values will be used.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "sparse_vector"
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        query_vector: Union[Mapping[str, float], "DefaultType"] = DEFAULT,
+        inference_id: Union[str, "DefaultType"] = DEFAULT,
+        query: Union[str, "DefaultType"] = DEFAULT,
+        prune: Union[bool, "DefaultType"] = DEFAULT,
+        pruning_config: Union[
+            "types.TokenPruningConfig", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(
+            field=field,
+            query_vector=query_vector,
+            inference_id=inference_id,
+            query=query,
+            prune=prune,
+            pruning_config=pruning_config,
+            boost=boost,
+            _name=_name,
+            **kwargs,
+        )
+
+
+class Term(Query):
+    """
+    Returns documents that contain an exact term in a provided field. To
+    return a document, the query term must exactly match the queried
+    field's value, including whitespace and capitalization.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "term"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.TermQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class Terms(Query):
+    """
+    Returns documents that contain one or more exact terms in a provided
+    field. To return a document, one or more terms must exactly match a
+    field value, including whitespace and capitalization.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "terms"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            Sequence[Union[int, float, str, bool, None]],
+            "types.TermsLookup",
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        *,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(boost=boost, _name=_name, **kwargs)
+
+    def _setattr(self, name: str, value: Any) -> None:
+        # here we convert any iterables that are not strings to lists
+        if hasattr(value, "__iter__") and not isinstance(value, (str, list, dict)):
+            value = list(value)
+        super()._setattr(name, value)
+
+
+class TermsSet(Query):
+    """
+    Returns documents that contain a minimum number of exact terms in a
+    provided field. To return a document, a required number of terms must
+    exactly match the field values, including whitespace and
+    capitalization.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "terms_set"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.TermsSetQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class TextExpansion(Query):
+    """
+    Uses a natural language processing model to convert the query text
+    into a list of token-weight pairs which are then used in a query
+    against a sparse vector or rank features field.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "text_expansion"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.TextExpansionQuery", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class WeightedTokens(Query):
+    """
+    Supports returning text_expansion query results by sending in
+    precomputed tokens with the query.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "weighted_tokens"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            "types.WeightedTokensQuery", Dict[str, Any], "DefaultType"
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class Wildcard(Query):
+    """
+    Returns documents that contain terms matching a wildcard pattern.
+
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    """
+
+    name = "wildcard"
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["types.WildcardQuery", Dict[str, Any], "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        super().__init__(**kwargs)
+
+
+class Wrapper(Query):
+    """
+    A query that accepts any other query as base64 encoded string.
+
+    :arg query: (required) A base64 encoded query. The binary data format
+        can be any of JSON, YAML, CBOR or SMILE encodings
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "wrapper"
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(query=query, boost=boost, _name=_name, **kwargs)
+
+
+class Type(Query):
+    """
+    :arg value: (required)
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    name = "type"
+
+    def __init__(
+        self,
+        *,
+        value: Union[str, "DefaultType"] = DEFAULT,
+        boost: Union[float, "DefaultType"] = DEFAULT,
+        _name: Union[str, "DefaultType"] = DEFAULT,
+        **kwargs: Any,
+    ):
+        super().__init__(value=value, boost=boost, _name=_name, **kwargs)
diff -pruN 8.17.2-2/elasticsearch/dsl/response/__init__.py 9.2.0-1/elasticsearch/dsl/response/__init__.py
--- 8.17.2-2/elasticsearch/dsl/response/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/response/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,391 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Generic,
+    Iterator,
+    List,
+    Mapping,
+    Optional,
+    Sequence,
+    Tuple,
+    Union,
+    cast,
+)
+
+from ..utils import _R, AttrDict, AttrList, _wrap
+from .hit import Hit, HitMeta
+
+if TYPE_CHECKING:
+    from .. import types
+    from ..aggs import Agg
+    from ..faceted_search_base import FacetedSearchBase
+    from ..search_base import Request, SearchBase
+    from ..update_by_query_base import UpdateByQueryBase
+
+__all__ = [
+    "Response",
+    "AggResponse",
+    "UpdateByQueryResponse",
+    "Hit",
+    "HitMeta",
+    "AggregateResponseType",
+]
+
+
+class Response(AttrDict[Any], Generic[_R]):
+    """An Elasticsearch search response.
+
+    :arg took: (required) The number of milliseconds it took Elasticsearch
+        to run the request. This value is calculated by measuring the time
+        elapsed between receipt of a request on the coordinating node and
+        the time at which the coordinating node is ready to send the
+        response. It includes:  * Communication time between the
+        coordinating node and data nodes * Time the request spends in the
+        search thread pool, queued for execution * Actual run time  It
+        does not include:  * Time needed to send the request to
+        Elasticsearch * Time needed to serialize the JSON response * Time
+        needed to send the response to a client
+    :arg timed_out: (required) If `true`, the request timed out before
+        completion; returned results may be partial or empty.
+    :arg _shards: (required) A count of shards used for the request.
+    :arg hits: search results
+    :arg aggregations: aggregation results
+    :arg _clusters:
+    :arg fields:
+    :arg max_score:
+    :arg num_reduce_phases:
+    :arg profile:
+    :arg pit_id:
+    :arg _scroll_id: The identifier for the search and its search context.
+        You can use this scroll ID with the scroll API to retrieve the
+        next batch of search results for the request. This property is
+        returned only if the `scroll` query parameter is specified in the
+        request.
+    :arg suggest:
+    :arg terminated_early:
+    """
+
+    _search: "SearchBase[_R]"
+    _faceted_search: "FacetedSearchBase[_R]"
+    _doc_class: Optional[_R]
+    _hits: List[_R]
+
+    took: int
+    timed_out: bool
+    _shards: "types.ShardStatistics"
+    _clusters: "types.ClusterStatistics"
+    fields: Mapping[str, Any]
+    max_score: float
+    num_reduce_phases: int
+    profile: "types.Profile"
+    pit_id: str
+    _scroll_id: str
+    suggest: Mapping[
+        str,
+        Sequence[
+            Union["types.CompletionSuggest", "types.PhraseSuggest", "types.TermSuggest"]
+        ],
+    ]
+    terminated_early: bool
+
+    def __init__(
+        self,
+        search: "Request[_R]",
+        response: Dict[str, Any],
+        doc_class: Optional[_R] = None,
+    ):
+        super(AttrDict, self).__setattr__("_search", search)
+        super(AttrDict, self).__setattr__("_doc_class", doc_class)
+        super().__init__(response)
+
+    def __iter__(self) -> Iterator[_R]:  # type: ignore[override]
+        return iter(self.hits)
+
+    def __getitem__(self, key: Union[slice, int, str]) -> Any:
+        if isinstance(key, (slice, int)):
+            # for slicing etc
+            return self.hits[key]
+        return super().__getitem__(key)
+
+    def __nonzero__(self) -> bool:
+        return bool(self.hits)
+
+    __bool__ = __nonzero__
+
+    def __repr__(self) -> str:
+        return "<Response: %r>" % (self.hits or self.aggregations)
+
+    def __len__(self) -> int:
+        return len(self.hits)
+
+    def __getstate__(self) -> Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]:  # type: ignore[override]
+        return self._d_, self._search, self._doc_class
+
+    def __setstate__(
+        self, state: Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]  # type: ignore[override]
+    ) -> None:
+        super(AttrDict, self).__setattr__("_d_", state[0])
+        super(AttrDict, self).__setattr__("_search", state[1])
+        super(AttrDict, self).__setattr__("_doc_class", state[2])
+
+    def success(self) -> bool:
+        return self._shards.total == self._shards.successful and not self.timed_out
+
+    @property
+    def hits(self) -> List[_R]:
+        if not hasattr(self, "_hits"):
+            h = cast(AttrDict[Any], self._d_["hits"])
+
+            try:
+                hits = AttrList(list(map(self._search._get_result, h["hits"])))
+            except AttributeError as e:
+                # avoid raising AttributeError since it will be hidden by the property
+                raise TypeError("Could not parse hits.", e)
+
+            # avoid assigning _hits into self._d_
+            super(AttrDict, self).__setattr__("_hits", hits)
+            for k in h:
+                setattr(self._hits, k, _wrap(h[k]))
+        return self._hits
+
+    @property
+    def aggregations(self) -> "AggResponse[_R]":
+        return self.aggs
+
+    @property
+    def aggs(self) -> "AggResponse[_R]":
+        if not hasattr(self, "_aggs"):
+            aggs = AggResponse[_R](
+                cast("Agg[_R]", self._search.aggs),
+                self._search,
+                cast(Dict[str, Any], self._d_.get("aggregations", {})),
+            )
+
+            # avoid assigning _aggs into self._d_
+            super(AttrDict, self).__setattr__("_aggs", aggs)
+        return cast("AggResponse[_R]", self._aggs)
+
+    def search_after(self) -> "SearchBase[_R]":
+        """
+        Return a ``Search`` instance that retrieves the next page of results.
+
+        This method provides an easy way to paginate a long list of results using
+        the ``search_after`` option. For example::
+
+            page_size = 20
+            s = Search()[:page_size].sort("date")
+
+            while True:
+                # get a page of results
+                r = await s.execute()
+
+                # do something with this page of results
+
+                # exit the loop if we reached the end
+                if len(r.hits) < page_size:
+                    break
+
+                # get a search object with the next page of results
+                s = r.search_after()
+
+        Note that the ``search_after`` option requires the search to have an
+        explicit ``sort`` order.
+        """
+        if len(self.hits) == 0:
+            raise ValueError("Cannot use search_after when there are no search results")
+        if not hasattr(self.hits[-1].meta, "sort"):  # type: ignore[attr-defined]
+            raise ValueError("Cannot use search_after when results are not sorted")
+        return self._search.extra(search_after=self.hits[-1].meta.sort)  # type: ignore[attr-defined]
+
+
+AggregateResponseType = Union[
+    "types.CardinalityAggregate",
+    "types.HdrPercentilesAggregate",
+    "types.HdrPercentileRanksAggregate",
+    "types.TDigestPercentilesAggregate",
+    "types.TDigestPercentileRanksAggregate",
+    "types.PercentilesBucketAggregate",
+    "types.MedianAbsoluteDeviationAggregate",
+    "types.MinAggregate",
+    "types.MaxAggregate",
+    "types.SumAggregate",
+    "types.AvgAggregate",
+    "types.WeightedAvgAggregate",
+    "types.ValueCountAggregate",
+    "types.SimpleValueAggregate",
+    "types.DerivativeAggregate",
+    "types.BucketMetricValueAggregate",
+    "types.ChangePointAggregate",
+    "types.StatsAggregate",
+    "types.StatsBucketAggregate",
+    "types.ExtendedStatsAggregate",
+    "types.ExtendedStatsBucketAggregate",
+    "types.CartesianBoundsAggregate",
+    "types.CartesianCentroidAggregate",
+    "types.GeoBoundsAggregate",
+    "types.GeoCentroidAggregate",
+    "types.HistogramAggregate",
+    "types.DateHistogramAggregate",
+    "types.AutoDateHistogramAggregate",
+    "types.VariableWidthHistogramAggregate",
+    "types.StringTermsAggregate",
+    "types.LongTermsAggregate",
+    "types.DoubleTermsAggregate",
+    "types.UnmappedTermsAggregate",
+    "types.LongRareTermsAggregate",
+    "types.StringRareTermsAggregate",
+    "types.UnmappedRareTermsAggregate",
+    "types.MultiTermsAggregate",
+    "types.MissingAggregate",
+    "types.NestedAggregate",
+    "types.ReverseNestedAggregate",
+    "types.GlobalAggregate",
+    "types.FilterAggregate",
+    "types.ChildrenAggregate",
+    "types.ParentAggregate",
+    "types.SamplerAggregate",
+    "types.UnmappedSamplerAggregate",
+    "types.GeoHashGridAggregate",
+    "types.GeoTileGridAggregate",
+    "types.GeoHexGridAggregate",
+    "types.RangeAggregate",
+    "types.DateRangeAggregate",
+    "types.GeoDistanceAggregate",
+    "types.IpRangeAggregate",
+    "types.IpPrefixAggregate",
+    "types.FiltersAggregate",
+    "types.AdjacencyMatrixAggregate",
+    "types.SignificantLongTermsAggregate",
+    "types.SignificantStringTermsAggregate",
+    "types.UnmappedSignificantTermsAggregate",
+    "types.CompositeAggregate",
+    "types.FrequentItemSetsAggregate",
+    "types.TimeSeriesAggregate",
+    "types.ScriptedMetricAggregate",
+    "types.TopHitsAggregate",
+    "types.InferenceAggregate",
+    "types.StringStatsAggregate",
+    "types.BoxPlotAggregate",
+    "types.TopMetricsAggregate",
+    "types.TTestAggregate",
+    "types.RateAggregate",
+    "types.CumulativeCardinalityAggregate",
+    "types.MatrixStatsAggregate",
+    "types.GeoLineAggregate",
+]
+
+
+class AggResponse(AttrDict[Any], Generic[_R]):
+    """An Elasticsearch aggregation response."""
+
+    _meta: Dict[str, Any]
+
+    def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]):
+        super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs})
+        super().__init__(data)
+
+    def __getitem__(self, attr_name: str) -> AggregateResponseType:
+        if attr_name in self._meta["aggs"]:
+            # don't do self._meta['aggs'][attr_name] to avoid copying
+            agg = self._meta["aggs"].aggs[attr_name]
+            return cast(
+                AggregateResponseType,
+                agg.result(self._meta["search"], self._d_[attr_name]),
+            )
+        return super().__getitem__(attr_name)  # type: ignore[no-any-return]
+
+    def __iter__(self) -> Iterator[AggregateResponseType]:  # type: ignore[override]
+        for name in self._meta["aggs"]:
+            yield self[name]
+
+
+class UpdateByQueryResponse(AttrDict[Any], Generic[_R]):
+    """An Elasticsearch update by query response.
+
+    :arg batches: The number of scroll responses pulled back by the update
+        by query.
+    :arg failures: Array of failures if there were any unrecoverable
+        errors during the process. If this is non-empty then the request
+        ended because of those failures. Update by query is implemented
+        using batches. Any failure causes the entire process to end, but
+        all failures in the current batch are collected into the array.
+        You can use the `conflicts` option to prevent reindex from ending
+        when version conflicts occur.
+    :arg noops: The number of documents that were ignored because the
+        script used for the update by query returned a noop value for
+        `ctx.op`.
+    :arg deleted: The number of documents that were successfully deleted.
+    :arg requests_per_second: The number of requests per second
+        effectively run during the update by query.
+    :arg retries: The number of retries attempted by update by query.
+        `bulk` is the number of bulk actions retried. `search` is the
+        number of search actions retried.
+    :arg task:
+    :arg timed_out: If true, some requests timed out during the update by
+        query.
+    :arg took: The number of milliseconds from start to end of the whole
+        operation.
+    :arg total: The number of documents that were successfully processed.
+    :arg updated: The number of documents that were successfully updated.
+    :arg version_conflicts: The number of version conflicts that the
+        update by query hit.
+    :arg throttled:
+    :arg throttled_millis: The number of milliseconds the request slept to
+        conform to `requests_per_second`.
+    :arg throttled_until:
+    :arg throttled_until_millis: This field should always be equal to zero
+        in an _update_by_query response. It only has meaning when using
+        the task API, where it indicates the next time (in milliseconds
+        since epoch) a throttled request will be run again in order to
+        conform to `requests_per_second`.
+    """
+
+    _search: "UpdateByQueryBase[_R]"
+
+    batches: int
+    failures: Sequence["types.BulkIndexByScrollFailure"]
+    noops: int
+    deleted: int
+    requests_per_second: float
+    retries: "types.Retries"
+    task: str
+    timed_out: bool
+    took: Any
+    total: int
+    updated: int
+    version_conflicts: int
+    throttled: Any
+    throttled_millis: Any
+    throttled_until: Any
+    throttled_until_millis: Any
+
+    def __init__(
+        self,
+        search: "Request[_R]",
+        response: Dict[str, Any],
+        doc_class: Optional[_R] = None,
+    ):
+        super(AttrDict, self).__setattr__("_search", search)
+        super(AttrDict, self).__setattr__("_doc_class", doc_class)
+        super().__init__(response)
+
+    def success(self) -> bool:
+        return not self.timed_out and not self.failures
diff -pruN 8.17.2-2/elasticsearch/dsl/response/aggs.py 9.2.0-1/elasticsearch/dsl/response/aggs.py
--- 8.17.2-2/elasticsearch/dsl/response/aggs.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/response/aggs.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,100 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union, cast
+
+from ..utils import _R, AttrDict, AttrList
+from . import AggResponse, Response
+
+if TYPE_CHECKING:
+    from ..aggs import Agg
+    from ..field import Field
+    from ..search_base import SearchBase
+
+
+class Bucket(AggResponse[_R]):
+    def __init__(
+        self,
+        aggs: "Agg[_R]",
+        search: "SearchBase[_R]",
+        data: Dict[str, Any],
+        field: Optional["Field"] = None,
+    ):
+        super().__init__(aggs, search, data)
+
+
+class FieldBucket(Bucket[_R]):
+    def __init__(
+        self,
+        aggs: "Agg[_R]",
+        search: "SearchBase[_R]",
+        data: Dict[str, Any],
+        field: Optional["Field"] = None,
+    ):
+        if field:
+            data["key"] = field.deserialize(data["key"])
+        super().__init__(aggs, search, data, field)
+
+
+class BucketData(AggResponse[_R]):
+    _bucket_class = Bucket
+    _buckets: Union[AttrDict[Any], AttrList[Any]]
+
+    def _wrap_bucket(self, data: Dict[str, Any]) -> Bucket[_R]:
+        return self._bucket_class(
+            self._meta["aggs"],
+            self._meta["search"],
+            data,
+            field=self._meta.get("field"),
+        )
+
+    def __iter__(self) -> Iterator["Agg"]:  # type: ignore[override]
+        return iter(self.buckets)
+
+    def __len__(self) -> int:
+        return len(self.buckets)
+
+    def __getitem__(self, key: Any) -> Any:
+        if isinstance(key, (int, slice)):
+            return cast(AttrList[Any], self.buckets)[key]
+        return super().__getitem__(key)
+
+    @property
+    def buckets(self) -> Union[AttrDict[Any], AttrList[Any]]:
+        if not hasattr(self, "_buckets"):
+            field = getattr(self._meta["aggs"], "field", None)
+            if field:
+                self._meta["field"] = self._meta["search"]._resolve_field(field)
+            bs = cast(Union[Dict[str, Any], List[Any]], self._d_["buckets"])
+            if isinstance(bs, list):
+                ret = AttrList(bs, obj_wrapper=self._wrap_bucket)
+            else:
+                ret = AttrDict[Any]({k: self._wrap_bucket(bs[k]) for k in bs})  # type: ignore[assignment]
+            super(AttrDict, self).__setattr__("_buckets", ret)
+        return self._buckets
+
+
+class FieldBucketData(BucketData[_R]):
+    _bucket_class = FieldBucket
+
+
+class TopHitsData(Response[_R]):
+    def __init__(self, agg: "Agg[_R]", search: "SearchBase[_R]", data: Any):
+        super(AttrDict, self).__setattr__(
+            "meta", AttrDict({"agg": agg, "search": search})
+        )
+        super().__init__(search, data)
diff -pruN 8.17.2-2/elasticsearch/dsl/response/hit.py 9.2.0-1/elasticsearch/dsl/response/hit.py
--- 8.17.2-2/elasticsearch/dsl/response/hit.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/response/hit.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,53 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, Dict, List, Tuple, cast
+
+from ..utils import AttrDict, HitMeta
+
+
+class Hit(AttrDict[Any]):
+    def __init__(self, document: Dict[str, Any]):
+        data: Dict[str, Any] = {}
+        if "_source" in document:
+            data = cast(Dict[str, Any], document["_source"])
+        if "fields" in document:
+            data.update(cast(Dict[str, Any], document["fields"]))
+
+        super().__init__(data)
+        # assign meta as attribute and not as key in self._d_
+        super(AttrDict, self).__setattr__("meta", HitMeta(document))
+
+    def __getstate__(self) -> Tuple[Dict[str, Any], HitMeta]:  # type: ignore[override]
+        # add self.meta since it is not in self.__dict__
+        return super().__getstate__() + (self.meta,)
+
+    def __setstate__(self, state: Tuple[Dict[str, Any], HitMeta]) -> None:  # type: ignore[override]
+        super(AttrDict, self).__setattr__("meta", state[-1])
+        super().__setstate__(state[:-1])
+
+    def __dir__(self) -> List[str]:
+        # be sure to expose meta in dir(self)
+        return super().__dir__() + ["meta"]
+
+    def __repr__(self) -> str:
+        return "<Hit({}): {}>".format(
+            "/".join(
+                getattr(self.meta, key) for key in ("index", "id") if key in self.meta
+            ),
+            super().__repr__(),
+        )
diff -pruN 8.17.2-2/elasticsearch/dsl/search.py 9.2.0-1/elasticsearch/dsl/search.py
--- 8.17.2-2/elasticsearch/dsl/search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,20 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from ._async.search import AsyncEmptySearch, AsyncMultiSearch, AsyncSearch  # noqa: F401
+from ._sync.search import EmptySearch, MultiSearch, Search  # noqa: F401
+from .search_base import Q  # noqa: F401
diff -pruN 8.17.2-2/elasticsearch/dsl/search_base.py 9.2.0-1/elasticsearch/dsl/search_base.py
--- 8.17.2-2/elasticsearch/dsl/search_base.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/search_base.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,1057 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+import copy
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    Dict,
+    Generic,
+    Iterator,
+    List,
+    Optional,
+    Protocol,
+    Tuple,
+    Type,
+    Union,
+    cast,
+    overload,
+)
+
+from typing_extensions import Self, TypeVar
+
+from .aggs import A, Agg, AggBase
+from .document_base import InstrumentedField
+from .exceptions import IllegalOperation
+from .query import Bool, Q, Query
+from .response import Hit, Response
+from .utils import _R, AnyUsingType, AttrDict, DslBase, recursive_to_dict
+
+if TYPE_CHECKING:
+    from .field import Field, Object
+
+
+class SupportsClone(Protocol):
+    def _clone(self) -> Self: ...
+
+
+_S = TypeVar("_S", bound=SupportsClone)
+
+
+class QueryProxy(Generic[_S]):
+    """
+    Simple proxy around DSL objects (queries) that can be called
+    (to add query/post_filter) and also allows attribute access which is proxied to
+    the wrapped query.
+    """
+
+    def __init__(self, search: _S, attr_name: str):
+        self._search = search
+        self._proxied: Optional[Query] = None
+        self._attr_name = attr_name
+
+    def __nonzero__(self) -> bool:
+        return self._proxied is not None
+
+    __bool__ = __nonzero__
+
+    def __call__(self, *args: Any, **kwargs: Any) -> _S:
+        """
+        Add a query.
+        """
+        s = self._search._clone()
+
+        # we cannot use self._proxied since we just cloned self._search and
+        # need to access the new self on the clone
+        proxied = getattr(s, self._attr_name)
+        if proxied._proxied is None:
+            proxied._proxied = Q(*args, **kwargs)
+        else:
+            proxied._proxied &= Q(*args, **kwargs)
+
+        # always return search to be chainable
+        return s
+
+    def __getattr__(self, attr_name: str) -> Any:
+        return getattr(self._proxied, attr_name)
+
+    def __setattr__(self, attr_name: str, value: Any) -> None:
+        if not attr_name.startswith("_"):
+            if self._proxied is not None:
+                self._proxied = Q(self._proxied.to_dict())
+                setattr(self._proxied, attr_name, value)
+        super().__setattr__(attr_name, value)
+
+    def __getstate__(self) -> Tuple[_S, Optional[Query], str]:
+        return self._search, self._proxied, self._attr_name
+
+    def __setstate__(self, state: Tuple[_S, Optional[Query], str]) -> None:
+        self._search, self._proxied, self._attr_name = state
+
+
+class ProxyDescriptor(Generic[_S]):
+    """
+    Simple descriptor to enable setting of queries and filters as:
+
+        s = Search()
+        s.query = Q(...)
+
+    """
+
+    def __init__(self, name: str):
+        self._attr_name = f"_{name}_proxy"
+
+    def __get__(self, instance: Any, owner: object) -> QueryProxy[_S]:
+        return cast(QueryProxy[_S], getattr(instance, self._attr_name))
+
+    def __set__(self, instance: _S, value: Dict[str, Any]) -> None:
+        proxy: QueryProxy[_S] = getattr(instance, self._attr_name)
+        proxy._proxied = Q(value)
+
+
+class AggsProxy(AggBase[_R], DslBase):
+    name = "aggs"
+
+    def __init__(self, search: "SearchBase[_R]"):
+        self._base = cast("Agg[_R]", self)
+        self._search = search
+        self._params = {"aggs": {}}
+
+    def to_dict(self) -> Dict[str, Any]:
+        return cast(Dict[str, Any], super().to_dict().get("aggs", {}))
+
+
+class Request(Generic[_R]):
+    def __init__(
+        self,
+        using: AnyUsingType = "default",
+        index: Optional[Union[str, List[str]]] = None,
+        doc_type: Optional[
+            Union[type, str, List[Union[type, str]], Dict[str, Union[type, str]]]
+        ] = None,
+        extra: Optional[Dict[str, Any]] = None,
+    ):
+        self._using = using
+
+        self._index = None
+        if isinstance(index, (tuple, list)):
+            self._index = list(index)
+        elif index:
+            self._index = [index]
+
+        self._doc_type: List[Union[type, str]] = []
+        self._doc_type_map: Dict[str, Any] = {}
+        if isinstance(doc_type, (tuple, list)):
+            self._doc_type.extend(doc_type)
+        elif isinstance(doc_type, collections.abc.Mapping):
+            self._doc_type.extend(doc_type.keys())
+            self._doc_type_map.update(doc_type)
+        elif doc_type:
+            self._doc_type.append(doc_type)
+
+        self._params: Dict[str, Any] = {}
+        self._extra: Dict[str, Any] = extra or {}
+
+    def __eq__(self, other: Any) -> bool:
+        return (
+            isinstance(other, Request)
+            and other._params == self._params
+            and other._index == self._index
+            and other._doc_type == self._doc_type
+            and other.to_dict() == self.to_dict()
+        )
+
+    def __copy__(self) -> Self:
+        return self._clone()
+
+    def params(self, **kwargs: Any) -> Self:
+        """
+        Specify query params to be used when executing the search. All the
+        keyword arguments will override the current values. See
+        https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.search
+        for all available parameters.
+
+        Example::
+
+            s = Search()
+            s = s.params(routing='user-1', preference='local')
+        """
+        s = self._clone()
+        s._params.update(kwargs)
+        return s
+
+    def index(self, *index: Union[str, List[str], Tuple[str, ...]]) -> Self:
+        """
+        Set the index for the search. If called empty it will remove all information.
+
+        Example::
+
+            s = Search()
+            s = s.index('twitter-2015.01.01', 'twitter-2015.01.02')
+            s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])
+        """
+        # .index() resets
+        s = self._clone()
+        if not index:
+            s._index = None
+        else:
+            indexes = []
+            for i in index:
+                if isinstance(i, str):
+                    indexes.append(i)
+                elif isinstance(i, list):
+                    indexes += i
+                elif isinstance(i, tuple):
+                    indexes += list(i)
+
+            s._index = (self._index or []) + indexes
+
+        return s
+
+    def _resolve_field(self, path: str) -> Optional["Field"]:
+        for dt in self._doc_type:
+            if not hasattr(dt, "_index"):
+                continue
+            field = dt._index.resolve_field(path)
+            if field is not None:
+                return cast("Field", field)
+        return None
+
+    def _resolve_nested(
+        self, hit: AttrDict[Any], parent_class: Optional[type] = None
+    ) -> Type[_R]:
+        doc_class = Hit
+
+        nested_path = []
+        nesting = hit["_nested"]
+        while nesting and "field" in nesting:
+            nested_path.append(nesting["field"])
+            nesting = nesting.get("_nested")
+        nested_path_str = ".".join(nested_path)
+
+        nested_field: Optional["Object"]
+        if parent_class is not None and hasattr(parent_class, "_index"):
+            nested_field = cast(
+                Optional["Object"], parent_class._index.resolve_field(nested_path_str)
+            )
+        else:
+            nested_field = cast(
+                Optional["Object"], self._resolve_field(nested_path_str)
+            )
+
+        if nested_field is not None:
+            return cast(Type[_R], nested_field._doc_class)
+
+        return cast(Type[_R], doc_class)
+
+    def _get_result(
+        self, hit: AttrDict[Any], parent_class: Optional[type] = None
+    ) -> _R:
+        doc_class: Any = Hit
+        dt = hit.get("_type")
+
+        if "_nested" in hit:
+            doc_class = self._resolve_nested(hit, parent_class)
+
+        elif dt in self._doc_type_map:
+            doc_class = self._doc_type_map[dt]
+
+        else:
+            for doc_type in self._doc_type:
+                if hasattr(doc_type, "_matches") and doc_type._matches(hit):
+                    doc_class = doc_type
+                    break
+
+        for t in hit.get("inner_hits", ()):
+            hit["inner_hits"][t] = Response[_R](
+                self, hit["inner_hits"][t], doc_class=doc_class
+            )
+
+        callback = getattr(doc_class, "from_es", doc_class)
+        return cast(_R, callback(hit))
+
+    def doc_type(
+        self, *doc_type: Union[type, str], **kwargs: Callable[[AttrDict[Any]], Any]
+    ) -> Self:
+        """
+        Set the type to search through. You can supply a single value or
+        multiple. Values can be strings or subclasses of ``Document``.
+
+        You can also pass in any keyword arguments, mapping a doc_type to a
+        callback that should be used instead of the Hit class.
+
+        If no doc_type is supplied any information stored on the instance will
+        be erased.
+
+        Example:
+
+            s = Search().doc_type('product', 'store', User, custom=my_callback)
+        """
+        # .doc_type() resets
+        s = self._clone()
+        if not doc_type and not kwargs:
+            s._doc_type = []
+            s._doc_type_map = {}
+        else:
+            s._doc_type.extend(doc_type)
+            s._doc_type.extend(kwargs.keys())
+            s._doc_type_map.update(kwargs)
+        return s
+
+    def using(self, client: AnyUsingType) -> Self:
+        """
+        Associate the search request with an elasticsearch client. A fresh copy
+        will be returned with current instance remaining unchanged.
+
+        :arg client: an instance of ``elasticsearch.Elasticsearch`` to use or
+            an alias to look up in ``elasticsearch.dsl.connections``
+
+        """
+        s = self._clone()
+        s._using = client
+        return s
+
+    def extra(self, **kwargs: Any) -> Self:
+        """
+        Add extra keys to the request body. Mostly here for backwards
+        compatibility.
+        """
+        s = self._clone()
+        if "from_" in kwargs:
+            kwargs["from"] = kwargs.pop("from_")
+        s._extra.update(kwargs)
+        return s
+
+    def _clone(self) -> Self:
+        s = self.__class__(
+            using=self._using, index=self._index, doc_type=self._doc_type
+        )
+        s._doc_type_map = self._doc_type_map.copy()
+        s._extra = self._extra.copy()
+        s._params = self._params.copy()
+        return s
+
+    if TYPE_CHECKING:
+
+        def to_dict(self) -> Dict[str, Any]: ...
+
+
+class SearchBase(Request[_R]):
+    query = ProxyDescriptor[Self]("query")
+    post_filter = ProxyDescriptor[Self]("post_filter")
+    _response: Response[_R]
+
+    def __init__(
+        self,
+        using: AnyUsingType = "default",
+        index: Optional[Union[str, List[str]]] = None,
+        **kwargs: Any,
+    ):
+        """
+        Search request to elasticsearch.
+
+        :arg using: `Elasticsearch` instance to use
+        :arg index: limit the search to index
+
+        All the parameters supplied (or omitted) at creation type can be later
+        overridden by methods (`using`, `index` and `doc_type` respectively).
+        """
+        super().__init__(using=using, index=index, **kwargs)
+
+        self.aggs = AggsProxy[_R](self)
+        self._sort: List[Union[str, Dict[str, Dict[str, str]]]] = []
+        self._knn: List[Dict[str, Any]] = []
+        self._rank: Dict[str, Any] = {}
+        self._collapse: Dict[str, Any] = {}
+        self._source: Optional[Union[bool, List[str], Dict[str, List[str]]]] = None
+        self._highlight: Dict[str, Any] = {}
+        self._highlight_opts: Dict[str, Any] = {}
+        self._suggest: Dict[str, Any] = {}
+        self._script_fields: Dict[str, Any] = {}
+        self._response_class = Response[_R]
+
+        self._query_proxy = QueryProxy(self, "query")
+        self._post_filter_proxy = QueryProxy(self, "post_filter")
+
+    def filter(self, *args: Any, **kwargs: Any) -> Self:
+        """
+        Add a query in filter context.
+        """
+        return self.query(Bool(filter=[Q(*args, **kwargs)]))
+
+    def exclude(self, *args: Any, **kwargs: Any) -> Self:
+        """
+        Add a negative query in filter context.
+        """
+        return self.query(Bool(filter=[~Q(*args, **kwargs)]))
+
+    def __getitem__(self, n: Union[int, slice]) -> Self:
+        """
+        Support slicing the `Search` instance for pagination.
+
+        Slicing equates to the from/size parameters. E.g.::
+
+            s = Search().query(...)[0:25]
+
+        is equivalent to::
+
+            s = Search().query(...).extra(from_=0, size=25)
+
+        """
+        s = self._clone()
+
+        if isinstance(n, slice):
+            # If negative slicing, abort.
+            if n.start and n.start < 0 or n.stop and n.stop < 0:
+                raise ValueError("Search does not support negative slicing.")
+            slice_start = n.start
+            slice_stop = n.stop
+        else:  # This is an index lookup, equivalent to slicing by [n:n+1].
+            # If negative index, abort.
+            if n < 0:
+                raise ValueError("Search does not support negative indexing.")
+            slice_start = n
+            slice_stop = n + 1
+
+        old_from = s._extra.get("from")
+        old_to = None
+        if "size" in s._extra:
+            old_to = (old_from or 0) + s._extra["size"]
+
+        new_from = old_from
+        if slice_start is not None:
+            new_from = (old_from or 0) + slice_start
+        new_to = old_to
+        if slice_stop is not None:
+            new_to = (old_from or 0) + slice_stop
+            if old_to is not None and old_to < new_to:
+                new_to = old_to
+
+        if new_from is not None:
+            s._extra["from"] = new_from
+        if new_to is not None:
+            s._extra["size"] = max(0, new_to - (new_from or 0))
+        return s
+
+    @classmethod
+    def from_dict(cls, d: Dict[str, Any]) -> Self:
+        """
+        Construct a new `Search` instance from a raw dict containing the search
+        body. Useful when migrating from raw dictionaries.
+
+        Example::
+
+            s = Search.from_dict({
+                "query": {
+                    "bool": {
+                        "must": [...]
+                    }
+                },
+                "aggs": {...}
+            })
+            s = s.filter('term', published=True)
+        """
+        s = cls()
+        s.update_from_dict(d)
+        return s
+
+    def _clone(self) -> Self:
+        """
+        Return a clone of the current search request. Performs a shallow copy
+        of all the underlying objects. Used internally by most state modifying
+        APIs.
+        """
+        s = super()._clone()
+
+        s._response_class = self._response_class
+        s._knn = [knn.copy() for knn in self._knn]
+        s._rank = self._rank.copy()
+        s._collapse = self._collapse.copy()
+        s._sort = self._sort[:]
+        s._source = copy.copy(self._source) if self._source is not None else None
+        s._highlight = self._highlight.copy()
+        s._highlight_opts = self._highlight_opts.copy()
+        s._suggest = self._suggest.copy()
+        s._script_fields = self._script_fields.copy()
+        for x in ("query", "post_filter"):
+            getattr(s, x)._proxied = getattr(self, x)._proxied
+
+        # copy top-level bucket definitions
+        if self.aggs._params.get("aggs"):
+            s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()}
+        return s
+
+    def response_class(self, cls: Type[Response[_R]]) -> Self:
+        """
+        Override the default wrapper used for the response.
+        """
+        s = self._clone()
+        s._response_class = cls
+        return s
+
+    def update_from_dict(self, d: Dict[str, Any]) -> Self:
+        """
+        Apply options from a serialized body to the current instance. Modifies
+        the object in-place. Used mostly by ``from_dict``.
+        """
+        d = d.copy()
+        if "query" in d:
+            self.query._proxied = Q(d.pop("query"))
+        if "post_filter" in d:
+            self.post_filter._proxied = Q(d.pop("post_filter"))
+
+        aggs = d.pop("aggs", d.pop("aggregations", {}))
+        if aggs:
+            self.aggs._params = {
+                "aggs": {name: A(value) for (name, value) in aggs.items()}
+            }
+        if "knn" in d:
+            self._knn = d.pop("knn")
+            if isinstance(self._knn, dict):
+                self._knn = [self._knn]
+        if "rank" in d:
+            self._rank = d.pop("rank")
+        if "collapse" in d:
+            self._collapse = d.pop("collapse")
+        if "sort" in d:
+            self._sort = d.pop("sort")
+        if "_source" in d:
+            self._source = d.pop("_source")
+        if "highlight" in d:
+            high = d.pop("highlight").copy()
+            self._highlight = high.pop("fields")
+            self._highlight_opts = high
+        if "suggest" in d:
+            self._suggest = d.pop("suggest")
+            if "text" in self._suggest:
+                text = self._suggest.pop("text")
+                for s in self._suggest.values():
+                    s.setdefault("text", text)
+        if "script_fields" in d:
+            self._script_fields = d.pop("script_fields")
+        self._extra.update(d)
+        return self
+
+    def script_fields(self, **kwargs: Any) -> Self:
+        """
+        Define script fields to be calculated on hits. See
+        https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html
+        for more details.
+
+        Example::
+
+            s = Search()
+            s = s.script_fields(times_two="doc['field'].value * 2")
+            s = s.script_fields(
+                times_three={
+                    'script': {
+                        'lang': 'painless',
+                        'source': "doc['field'].value * params.n",
+                        'params': {'n': 3}
+                    }
+                }
+            )
+
+        """
+        s = self._clone()
+        for name in kwargs:
+            if isinstance(kwargs[name], str):
+                kwargs[name] = {"script": kwargs[name]}
+        s._script_fields.update(kwargs)
+        return s
+
+    def knn(
+        self,
+        field: Union[str, "InstrumentedField"],
+        k: int,
+        num_candidates: int,
+        query_vector: Optional[List[float]] = None,
+        query_vector_builder: Optional[Dict[str, Any]] = None,
+        boost: Optional[float] = None,
+        filter: Optional[Query] = None,
+        similarity: Optional[float] = None,
+        inner_hits: Optional[Dict[str, Any]] = None,
+    ) -> Self:
+        """
+        Add a k-nearest neighbor (kNN) search.
+
+        :arg field: the vector field to search against as a string or document class attribute
+        :arg k: number of nearest neighbors to return as top hits
+        :arg num_candidates: number of nearest neighbor candidates to consider per shard
+        :arg query_vector: the vector to search for
+        :arg query_vector_builder: A dictionary indicating how to build a query vector
+        :arg boost: A floating-point boost factor for kNN scores
+        :arg filter: query to filter the documents that can match
+        :arg similarity: the minimum similarity required for a document to be considered a match, as a float value
+        :arg inner_hits: retrieve hits from nested field
+
+        Example::
+
+            s = Search()
+            s = s.knn(field='embedding', k=5, num_candidates=10, query_vector=vector,
+                      filter=Q('term', category='blog')))
+        """
+        s = self._clone()
+        s._knn.append(
+            {
+                "field": str(field),  # str() is for InstrumentedField instances
+                "k": k,
+                "num_candidates": num_candidates,
+            }
+        )
+        if query_vector is None and query_vector_builder is None:
+            raise ValueError("one of query_vector and query_vector_builder is required")
+        if query_vector is not None and query_vector_builder is not None:
+            raise ValueError(
+                "only one of query_vector and query_vector_builder must be given"
+            )
+        if query_vector is not None:
+            s._knn[-1]["query_vector"] = cast(Any, query_vector)
+        if query_vector_builder is not None:
+            s._knn[-1]["query_vector_builder"] = query_vector_builder
+        if boost is not None:
+            s._knn[-1]["boost"] = boost
+        if filter is not None:
+            if isinstance(filter, Query):
+                s._knn[-1]["filter"] = filter.to_dict()
+            else:
+                s._knn[-1]["filter"] = filter
+        if similarity is not None:
+            s._knn[-1]["similarity"] = similarity
+        if inner_hits is not None:
+            s._knn[-1]["inner_hits"] = inner_hits
+        return s
+
+    def rank(self, rrf: Optional[Union[bool, Dict[str, Any]]] = None) -> Self:
+        """
+        Defines a method for combining and ranking results sets from a combination
+        of searches. Requires a minimum of 2 results sets.
+
+        :arg rrf: Set to ``True`` or an options dictionary to set the rank method to reciprocal rank fusion (RRF).
+
+        Example::
+
+            s = Search()
+            s = s.query('match', content='search text')
+            s = s.knn(field='embedding', k=5, num_candidates=10, query_vector=vector)
+            s = s.rank(rrf=True)
+
+        Note: This option is in technical preview and may change in the future. The syntax will likely change before GA.
+        """
+        s = self._clone()
+        s._rank = {}
+        if rrf is not None and rrf is not False:
+            s._rank["rrf"] = {} if rrf is True else rrf
+        return s
+
+    def source(
+        self,
+        fields: Optional[
+            Union[
+                bool,
+                str,
+                "InstrumentedField",
+                List[Union[str, "InstrumentedField"]],
+                Dict[str, List[Union[str, "InstrumentedField"]]],
+            ]
+        ] = None,
+        **kwargs: Any,
+    ) -> Self:
+        """
+        Selectively control how the _source field is returned.
+
+        :arg fields: field name, wildcard string, list of field names or wildcards,
+                     or dictionary of includes and excludes
+        :arg kwargs: ``includes`` or ``excludes`` arguments, when ``fields`` is ``None``.
+
+        When no arguments are given, the entire document will be returned for
+        each hit.  If ``fields`` is a string or list of strings, the field names or field
+        wildcards given will be included. If ``fields`` is a dictionary with keys of
+        'includes' and/or 'excludes' the fields will be either included or excluded
+        appropriately.
+
+        Calling this multiple times with the same named parameter will override the
+        previous values with the new ones.
+
+        Example::
+
+            s = Search()
+            s = s.source(includes=['obj1.*'], excludes=["*.description"])
+
+            s = Search()
+            s = s.source(includes=['obj1.*']).source(excludes=["*.description"])
+
+        """
+        s = self._clone()
+
+        if fields and kwargs:
+            raise ValueError("You cannot specify fields and kwargs at the same time.")
+
+        @overload
+        def ensure_strings(fields: str) -> str: ...
+
+        @overload
+        def ensure_strings(fields: "InstrumentedField") -> str: ...
+
+        @overload
+        def ensure_strings(
+            fields: List[Union[str, "InstrumentedField"]],
+        ) -> List[str]: ...
+
+        @overload
+        def ensure_strings(
+            fields: Dict[str, List[Union[str, "InstrumentedField"]]],
+        ) -> Dict[str, List[str]]: ...
+
+        def ensure_strings(
+            fields: Union[
+                bool,
+                str,
+                "InstrumentedField",
+                List[Union[str, "InstrumentedField"]],
+                Dict[str, List[Union[str, "InstrumentedField"]]],
+            ],
+        ) -> Union[bool, str, List[str], Dict[str, List[str]]]:
+            if isinstance(fields, dict):
+                return {k: ensure_strings(v) for k, v in fields.items()}
+            elif isinstance(fields, bool):
+                # boolean settings should stay the way they are
+                return fields
+            elif not isinstance(fields, (str, InstrumentedField)):
+                # we assume that if `fields` is not a any of [dict, str,
+                # InstrumentedField] then it is an iterable of strings or
+                # InstrumentedFields, so we convert them to a plain list of
+                # strings
+                return [str(f) for f in fields]
+            else:
+                return str(fields)
+
+        if fields is not None:
+            s._source = fields if isinstance(fields, bool) else ensure_strings(fields)  # type: ignore[assignment]
+            return s
+
+        if kwargs and not isinstance(s._source, dict):
+            s._source = {}
+
+        if isinstance(s._source, dict):
+            for key, value in kwargs.items():
+                if value is None:
+                    try:
+                        del s._source[key]
+                    except KeyError:
+                        pass
+                else:
+                    s._source[key] = ensure_strings(value)
+
+        return s
+
+    def sort(
+        self, *keys: Union[str, "InstrumentedField", Dict[str, Dict[str, str]]]
+    ) -> Self:
+        """
+        Add sorting information to the search request. If called without
+        arguments it will remove all sort requirements. Otherwise it will
+        replace them. Acceptable arguments are::
+
+            'some.field'
+            '-some.other.field'
+            {'different.field': {'any': 'dict'}}
+
+        so for example::
+
+            s = Search().sort(
+                'category',
+                '-title',
+                {"price" : {"order" : "asc", "mode" : "avg"}}
+            )
+
+        will sort by ``category``, ``title`` (in descending order) and
+        ``price`` in ascending order using the ``avg`` mode.
+
+        The API returns a copy of the Search object and can thus be chained.
+        """
+        s = self._clone()
+        s._sort = []
+        for k in keys:
+            if not isinstance(k, dict):
+                sort_field = str(k)
+                if sort_field.startswith("-"):
+                    if sort_field[1:] == "_score":
+                        raise IllegalOperation("Sorting by `-_score` is not allowed.")
+                    s._sort.append({sort_field[1:]: {"order": "desc"}})
+                else:
+                    s._sort.append(sort_field)
+            else:
+                s._sort.append(k)
+        return s
+
+    def collapse(
+        self,
+        field: Optional[Union[str, "InstrumentedField"]] = None,
+        inner_hits: Optional[Dict[str, Any]] = None,
+        max_concurrent_group_searches: Optional[int] = None,
+    ) -> Self:
+        """
+        Add collapsing information to the search request.
+        If called without providing ``field``, it will remove all collapse
+        requirements, otherwise it will replace them with the provided
+        arguments.
+        The API returns a copy of the Search object and can thus be chained.
+        """
+        s = self._clone()
+        s._collapse = {}
+
+        if field is None:
+            return s
+
+        s._collapse["field"] = str(field)
+        if inner_hits:
+            s._collapse["inner_hits"] = inner_hits
+        if max_concurrent_group_searches:
+            s._collapse["max_concurrent_group_searches"] = max_concurrent_group_searches
+        return s
+
+    def highlight_options(self, **kwargs: Any) -> Self:
+        """
+        Update the global highlighting options used for this request. For
+        example::
+
+            s = Search()
+            s = s.highlight_options(order='score')
+        """
+        s = self._clone()
+        s._highlight_opts.update(kwargs)
+        return s
+
+    def highlight(
+        self, *fields: Union[str, "InstrumentedField"], **kwargs: Any
+    ) -> Self:
+        """
+        Request highlighting of some fields. All keyword arguments passed in will be
+        used as parameters for all the fields in the ``fields`` parameter. Example::
+
+            Search().highlight('title', 'body', fragment_size=50)
+
+        will produce the equivalent of::
+
+            {
+                "highlight": {
+                    "fields": {
+                        "body": {"fragment_size": 50},
+                        "title": {"fragment_size": 50}
+                    }
+                }
+            }
+
+        If you want to have different options for different fields
+        you can call ``highlight`` twice::
+
+            Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100)
+
+        which will produce::
+
+            {
+                "highlight": {
+                    "fields": {
+                        "body": {"fragment_size": 100},
+                        "title": {"fragment_size": 50}
+                    }
+                }
+            }
+
+        """
+        s = self._clone()
+        for f in fields:
+            s._highlight[str(f)] = kwargs
+        return s
+
+    def suggest(
+        self,
+        name: str,
+        text: Optional[str] = None,
+        regex: Optional[str] = None,
+        **kwargs: Any,
+    ) -> Self:
+        """
+        Add a suggestions request to the search.
+
+        :arg name: name of the suggestion
+        :arg text: text to suggest on
+
+        All keyword arguments will be added to the suggestions body. For example::
+
+            s = Search()
+            s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})
+
+        # regex query for Completion Suggester
+            s = Search()
+            s = s.suggest('suggestion-1', regex='py[thon|py]', completion={'field': 'body'})
+        """
+        if text is None and regex is None:
+            raise ValueError('You have to pass "text" or "regex" argument.')
+        if text and regex:
+            raise ValueError('You can only pass either "text" or "regex" argument.')
+        if regex and "completion" not in kwargs:
+            raise ValueError(
+                '"regex" argument must be passed with "completion" keyword argument.'
+            )
+
+        s = self._clone()
+        if regex:
+            s._suggest[name] = {"regex": regex}
+        elif text:
+            if "completion" in kwargs:
+                s._suggest[name] = {"prefix": text}
+            else:
+                s._suggest[name] = {"text": text}
+        s._suggest[name].update(kwargs)
+        return s
+
+    def search_after(self) -> Self:
+        """
+        Return a ``Search`` instance that retrieves the next page of results.
+
+        This method provides an easy way to paginate a long list of results using
+        the ``search_after`` option. For example::
+
+            page_size = 20
+            s = Search()[:page_size].sort("date")
+
+            while True:
+                # get a page of results
+                r = await s.execute()
+
+                # do something with this page of results
+
+                # exit the loop if we reached the end
+                if len(r.hits) < page_size:
+                    break
+
+                # get a search object with the next page of results
+                s = s.search_after()
+
+        Note that the ``search_after`` option requires the search to have an
+        explicit ``sort`` order.
+        """
+        if not hasattr(self, "_response"):
+            raise ValueError("A search must be executed before using search_after")
+        return cast(Self, self._response.search_after())
+
+    def to_dict(self, count: bool = False, **kwargs: Any) -> Dict[str, Any]:
+        """
+        Serialize the search into the dictionary that will be sent over as the
+        request's body.
+
+        :arg count: a flag to specify if we are interested in a body for count -
+            no aggregations, no pagination bounds etc.
+
+        All additional keyword arguments will be included into the dictionary.
+        """
+        d = {}
+
+        if self.query:
+            d["query"] = recursive_to_dict(self.query)
+
+        if self._knn:
+            if len(self._knn) == 1:
+                d["knn"] = self._knn[0]
+            else:
+                d["knn"] = self._knn
+
+        if self._rank:
+            d["rank"] = self._rank
+
+        # count request doesn't care for sorting and other things
+        if not count:
+            if self.post_filter:
+                d["post_filter"] = recursive_to_dict(self.post_filter.to_dict())
+
+            if self.aggs.aggs:
+                d.update(recursive_to_dict(self.aggs.to_dict()))
+
+            if self._sort:
+                d["sort"] = self._sort
+
+            if self._collapse:
+                d["collapse"] = self._collapse
+
+            d.update(recursive_to_dict(self._extra))
+
+            if self._source not in (None, {}):
+                d["_source"] = self._source
+
+            if self._highlight:
+                d["highlight"] = {"fields": self._highlight}
+                d["highlight"].update(self._highlight_opts)
+
+            if self._suggest:
+                d["suggest"] = self._suggest
+
+            if self._script_fields:
+                d["script_fields"] = self._script_fields
+
+        d.update(recursive_to_dict(kwargs))
+        return d
+
+
+class MultiSearchBase(Request[_R]):
+    """
+    Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single
+    request.
+    """
+
+    def __init__(self, **kwargs: Any):
+        super().__init__(**kwargs)
+        self._searches: List[SearchBase[_R]] = []
+
+    def __getitem__(self, key: Union[int, slice]) -> Any:
+        return self._searches[key]
+
+    def __iter__(self) -> Iterator[SearchBase[_R]]:
+        return iter(self._searches)
+
+    def _clone(self) -> Self:
+        ms = super()._clone()
+        ms._searches = self._searches[:]
+        return ms
+
+    def add(self, search: SearchBase[_R]) -> Self:
+        """
+        Adds a new :class:`~elasticsearch.dsl.Search` object to the request::
+
+            ms = MultiSearch(index='my-index')
+            ms = ms.add(Search(doc_type=Category).filter('term', category='python'))
+            ms = ms.add(Search(doc_type=Blog))
+        """
+        ms = self._clone()
+        ms._searches.append(search)
+        return ms
+
+    def to_dict(self) -> List[Dict[str, Any]]:  # type: ignore[override]
+        out: List[Dict[str, Any]] = []
+        for s in self._searches:
+            meta: Dict[str, Any] = {}
+            if s._index:
+                meta["index"] = cast(Any, s._index)
+            meta.update(s._params)
+
+            out.append(meta)
+            out.append(s.to_dict())
+
+        return out
diff -pruN 8.17.2-2/elasticsearch/dsl/serializer.py 9.2.0-1/elasticsearch/dsl/serializer.py
--- 8.17.2-2/elasticsearch/dsl/serializer.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/serializer.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,34 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any
+
+from elasticsearch.serializer import JSONSerializer
+
+from .utils import AttrList
+
+
+class AttrJSONSerializer(JSONSerializer):
+    def default(self, data: Any) -> Any:
+        if isinstance(data, AttrList):
+            return data._l_
+        if hasattr(data, "to_dict"):
+            return data.to_dict()
+        return super().default(data)
+
+
+serializer = AttrJSONSerializer()
diff -pruN 8.17.2-2/elasticsearch/dsl/types.py 9.2.0-1/elasticsearch/dsl/types.py
--- 8.17.2-2/elasticsearch/dsl/types.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/types.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,6926 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, Dict, Literal, Mapping, Sequence, Union
+
+from elastic_transport.client_utils import DEFAULT, DefaultType
+
+from . import Query
+from .document_base import InstrumentedField
+from .utils import AttrDict
+
+PipeSeparatedFlags = str
+
+
+class BucketCorrelationFunction(AttrDict[Any]):
+    """
+    :arg count_correlation: (required) The configuration to calculate a
+        count correlation. This function is designed for determining the
+        correlation of a term value and a given metric.
+    """
+
+    count_correlation: Union[
+        "BucketCorrelationFunctionCountCorrelation", Dict[str, Any], DefaultType
+    ]
+
+    def __init__(
+        self,
+        *,
+        count_correlation: Union[
+            "BucketCorrelationFunctionCountCorrelation", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if count_correlation is not DEFAULT:
+            kwargs["count_correlation"] = count_correlation
+        super().__init__(kwargs)
+
+
+class BucketCorrelationFunctionCountCorrelation(AttrDict[Any]):
+    """
+    :arg indicator: (required) The indicator with which to correlate the
+        configured `bucket_path` values.
+    """
+
+    indicator: Union[
+        "BucketCorrelationFunctionCountCorrelationIndicator",
+        Dict[str, Any],
+        DefaultType,
+    ]
+
+    def __init__(
+        self,
+        *,
+        indicator: Union[
+            "BucketCorrelationFunctionCountCorrelationIndicator",
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if indicator is not DEFAULT:
+            kwargs["indicator"] = indicator
+        super().__init__(kwargs)
+
+
+class BucketCorrelationFunctionCountCorrelationIndicator(AttrDict[Any]):
+    """
+    :arg doc_count: (required) The total number of documents that
+        initially created the expectations. It’s required to be greater
+        than or equal to the sum of all values in the buckets_path as this
+        is the originating superset of data to which the term values are
+        correlated.
+    :arg expectations: (required) An array of numbers with which to
+        correlate the configured `bucket_path` values. The length of this
+        value must always equal the number of buckets returned by the
+        `bucket_path`.
+    :arg fractions: An array of fractions to use when averaging and
+        calculating variance. This should be used if the pre-calculated
+        data and the buckets_path have known gaps. The length of
+        fractions, if provided, must equal expectations.
+    """
+
+    doc_count: Union[int, DefaultType]
+    expectations: Union[Sequence[float], DefaultType]
+    fractions: Union[Sequence[float], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        doc_count: Union[int, DefaultType] = DEFAULT,
+        expectations: Union[Sequence[float], DefaultType] = DEFAULT,
+        fractions: Union[Sequence[float], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if doc_count is not DEFAULT:
+            kwargs["doc_count"] = doc_count
+        if expectations is not DEFAULT:
+            kwargs["expectations"] = expectations
+        if fractions is not DEFAULT:
+            kwargs["fractions"] = fractions
+        super().__init__(kwargs)
+
+
+class ChiSquareHeuristic(AttrDict[Any]):
+    """
+    :arg background_is_superset: (required) Set to `false` if you defined
+        a custom background filter that represents a different set of
+        documents that you want to compare to.
+    :arg include_negatives: (required) Set to `false` to filter out the
+        terms that appear less often in the subset than in documents
+        outside the subset.
+    """
+
+    background_is_superset: Union[bool, DefaultType]
+    include_negatives: Union[bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        background_is_superset: Union[bool, DefaultType] = DEFAULT,
+        include_negatives: Union[bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if background_is_superset is not DEFAULT:
+            kwargs["background_is_superset"] = background_is_superset
+        if include_negatives is not DEFAULT:
+            kwargs["include_negatives"] = include_negatives
+        super().__init__(kwargs)
+
+
+class ChunkingSettings(AttrDict[Any]):
+    """
+    :arg strategy: (required) The chunking strategy: `sentence`, `word`,
+        `none` or `recursive`.   * If `strategy` is set to `recursive`,
+        you must also specify:  - `max_chunk_size` - either `separators`
+        or`separator_group`  Learn more about different chunking
+        strategies in the linked documentation. Defaults to `sentence` if
+        omitted.
+    :arg max_chunk_size: (required) The maximum size of a chunk in words.
+        This value cannot be lower than `20` (for `sentence` strategy) or
+        `10` (for `word` strategy). This value should not exceed the
+        window size for the associated model. Defaults to `250` if
+        omitted.
+    :arg separator_group: Only applicable to the `recursive` strategy and
+        required when using it.  Sets a predefined list of separators in
+        the saved chunking settings based on the selected text type.
+        Values can be `markdown` or `plaintext`.  Using this parameter is
+        an alternative to manually specifying a custom `separators` list.
+    :arg separators: Only applicable to the `recursive` strategy and
+        required when using it.  A list of strings used as possible split
+        points when chunking text.  Each string can be a plain string or a
+        regular expression (regex) pattern. The system tries each
+        separator in order to split the text, starting from the first item
+        in the list.  After splitting, it attempts to recombine smaller
+        pieces into larger chunks that stay within the `max_chunk_size`
+        limit, to reduce the total number of chunks generated.
+    :arg overlap: The number of overlapping words for chunks. It is
+        applicable only to a `word` chunking strategy. This value cannot
+        be higher than half the `max_chunk_size` value. Defaults to `100`
+        if omitted.
+    :arg sentence_overlap: The number of overlapping sentences for chunks.
+        It is applicable only for a `sentence` chunking strategy. It can
+        be either `1` or `0`. Defaults to `1` if omitted.
+    """
+
+    strategy: Union[str, DefaultType]
+    max_chunk_size: Union[int, DefaultType]
+    separator_group: Union[str, DefaultType]
+    separators: Union[Sequence[str], DefaultType]
+    overlap: Union[int, DefaultType]
+    sentence_overlap: Union[int, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        strategy: Union[str, DefaultType] = DEFAULT,
+        max_chunk_size: Union[int, DefaultType] = DEFAULT,
+        separator_group: Union[str, DefaultType] = DEFAULT,
+        separators: Union[Sequence[str], DefaultType] = DEFAULT,
+        overlap: Union[int, DefaultType] = DEFAULT,
+        sentence_overlap: Union[int, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if strategy is not DEFAULT:
+            kwargs["strategy"] = strategy
+        if max_chunk_size is not DEFAULT:
+            kwargs["max_chunk_size"] = max_chunk_size
+        if separator_group is not DEFAULT:
+            kwargs["separator_group"] = separator_group
+        if separators is not DEFAULT:
+            kwargs["separators"] = separators
+        if overlap is not DEFAULT:
+            kwargs["overlap"] = overlap
+        if sentence_overlap is not DEFAULT:
+            kwargs["sentence_overlap"] = sentence_overlap
+        super().__init__(kwargs)
+
+
+class ClassificationInferenceOptions(AttrDict[Any]):
+    """
+    :arg num_top_classes: Specifies the number of top class predictions to
+        return. Defaults to 0.
+    :arg num_top_feature_importance_values: Specifies the maximum number
+        of feature importance values per document.
+    :arg prediction_field_type: Specifies the type of the predicted field
+        to write. Acceptable values are: string, number, boolean. When
+        boolean is provided 1.0 is transformed to true and 0.0 to false.
+    :arg results_field: The field that is added to incoming documents to
+        contain the inference prediction. Defaults to predicted_value.
+    :arg top_classes_results_field: Specifies the field to which the top
+        classes are written. Defaults to top_classes.
+    """
+
+    num_top_classes: Union[int, DefaultType]
+    num_top_feature_importance_values: Union[int, DefaultType]
+    prediction_field_type: Union[str, DefaultType]
+    results_field: Union[str, DefaultType]
+    top_classes_results_field: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        num_top_classes: Union[int, DefaultType] = DEFAULT,
+        num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT,
+        prediction_field_type: Union[str, DefaultType] = DEFAULT,
+        results_field: Union[str, DefaultType] = DEFAULT,
+        top_classes_results_field: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if num_top_classes is not DEFAULT:
+            kwargs["num_top_classes"] = num_top_classes
+        if num_top_feature_importance_values is not DEFAULT:
+            kwargs["num_top_feature_importance_values"] = (
+                num_top_feature_importance_values
+            )
+        if prediction_field_type is not DEFAULT:
+            kwargs["prediction_field_type"] = prediction_field_type
+        if results_field is not DEFAULT:
+            kwargs["results_field"] = results_field
+        if top_classes_results_field is not DEFAULT:
+            kwargs["top_classes_results_field"] = top_classes_results_field
+        super().__init__(kwargs)
+
+
+class CommonTermsQuery(AttrDict[Any]):
+    """
+    :arg query: (required)
+    :arg analyzer:
+    :arg cutoff_frequency:
+    :arg high_freq_operator:
+    :arg low_freq_operator:
+    :arg minimum_should_match:
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    query: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    cutoff_frequency: Union[float, DefaultType]
+    high_freq_operator: Union[Literal["and", "or"], DefaultType]
+    low_freq_operator: Union[Literal["and", "or"], DefaultType]
+    minimum_should_match: Union[int, str, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        cutoff_frequency: Union[float, DefaultType] = DEFAULT,
+        high_freq_operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
+        low_freq_operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
+        minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if query is not DEFAULT:
+            kwargs["query"] = query
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if cutoff_frequency is not DEFAULT:
+            kwargs["cutoff_frequency"] = cutoff_frequency
+        if high_freq_operator is not DEFAULT:
+            kwargs["high_freq_operator"] = high_freq_operator
+        if low_freq_operator is not DEFAULT:
+            kwargs["low_freq_operator"] = low_freq_operator
+        if minimum_should_match is not DEFAULT:
+            kwargs["minimum_should_match"] = minimum_should_match
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class CoordsGeoBounds(AttrDict[Any]):
+    """
+    :arg top: (required)
+    :arg bottom: (required)
+    :arg left: (required)
+    :arg right: (required)
+    """
+
+    top: Union[float, DefaultType]
+    bottom: Union[float, DefaultType]
+    left: Union[float, DefaultType]
+    right: Union[float, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        top: Union[float, DefaultType] = DEFAULT,
+        bottom: Union[float, DefaultType] = DEFAULT,
+        left: Union[float, DefaultType] = DEFAULT,
+        right: Union[float, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if top is not DEFAULT:
+            kwargs["top"] = top
+        if bottom is not DEFAULT:
+            kwargs["bottom"] = bottom
+        if left is not DEFAULT:
+            kwargs["left"] = left
+        if right is not DEFAULT:
+            kwargs["right"] = right
+        super().__init__(kwargs)
+
+
+class CustomCategorizeTextAnalyzer(AttrDict[Any]):
+    """
+    :arg char_filter:
+    :arg tokenizer:
+    :arg filter:
+    """
+
+    char_filter: Union[Sequence[str], DefaultType]
+    tokenizer: Union[str, DefaultType]
+    filter: Union[Sequence[str], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        char_filter: Union[Sequence[str], DefaultType] = DEFAULT,
+        tokenizer: Union[str, DefaultType] = DEFAULT,
+        filter: Union[Sequence[str], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if char_filter is not DEFAULT:
+            kwargs["char_filter"] = char_filter
+        if tokenizer is not DEFAULT:
+            kwargs["tokenizer"] = tokenizer
+        if filter is not DEFAULT:
+            kwargs["filter"] = filter
+        super().__init__(kwargs)
+
+
+class DenseVectorIndexOptions(AttrDict[Any]):
+    """
+    :arg type: (required) The type of kNN algorithm to use.
+    :arg confidence_interval: The confidence interval to use when
+        quantizing the vectors. Can be any value between and including
+        `0.90` and `1.0` or exactly `0`. When the value is `0`, this
+        indicates that dynamic quantiles should be calculated for
+        optimized quantization. When between `0.90` and `1.0`, this value
+        restricts the values used when calculating the quantization
+        thresholds.  For example, a value of `0.95` will only use the
+        middle `95%` of the values when calculating the quantization
+        thresholds (e.g. the highest and lowest `2.5%` of values will be
+        ignored).  Defaults to `1/(dims + 1)` for `int8` quantized vectors
+        and `0` for `int4` for dynamic quantile calculation.  Only
+        applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and
+        `int4_flat` index types.
+    :arg ef_construction: The number of candidates to track while
+        assembling the list of nearest neighbors for each new node.  Only
+        applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw`
+        index types. Defaults to `100` if omitted.
+    :arg m: The number of neighbors each node will be connected to in the
+        HNSW graph.  Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`,
+        and `int4_hnsw` index types. Defaults to `16` if omitted.
+    :arg rescore_vector: The rescore vector options. This is only
+        applicable to `bbq_disk`, `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`,
+        `bbq_flat`, `int4_flat`, and `int8_flat` index types.
+    :arg on_disk_rescore: `true` if vector rescoring should be done on-
+        disk  Only applicable to `bbq_hnsw`
+    """
+
+    type: Union[
+        Literal[
+            "bbq_flat",
+            "bbq_hnsw",
+            "bbq_disk",
+            "flat",
+            "hnsw",
+            "int4_flat",
+            "int4_hnsw",
+            "int8_flat",
+            "int8_hnsw",
+        ],
+        DefaultType,
+    ]
+    confidence_interval: Union[float, DefaultType]
+    ef_construction: Union[int, DefaultType]
+    m: Union[int, DefaultType]
+    rescore_vector: Union[
+        "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType
+    ]
+    on_disk_rescore: Union[bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        type: Union[
+            Literal[
+                "bbq_flat",
+                "bbq_hnsw",
+                "bbq_disk",
+                "flat",
+                "hnsw",
+                "int4_flat",
+                "int4_hnsw",
+                "int8_flat",
+                "int8_hnsw",
+            ],
+            DefaultType,
+        ] = DEFAULT,
+        confidence_interval: Union[float, DefaultType] = DEFAULT,
+        ef_construction: Union[int, DefaultType] = DEFAULT,
+        m: Union[int, DefaultType] = DEFAULT,
+        rescore_vector: Union[
+            "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        on_disk_rescore: Union[bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if type is not DEFAULT:
+            kwargs["type"] = type
+        if confidence_interval is not DEFAULT:
+            kwargs["confidence_interval"] = confidence_interval
+        if ef_construction is not DEFAULT:
+            kwargs["ef_construction"] = ef_construction
+        if m is not DEFAULT:
+            kwargs["m"] = m
+        if rescore_vector is not DEFAULT:
+            kwargs["rescore_vector"] = rescore_vector
+        if on_disk_rescore is not DEFAULT:
+            kwargs["on_disk_rescore"] = on_disk_rescore
+        super().__init__(kwargs)
+
+
+class DenseVectorIndexOptionsRescoreVector(AttrDict[Any]):
+    """
+    :arg oversample: (required) The oversampling factor to use when
+        searching for the nearest neighbor. This is only applicable to the
+        quantized formats: `bbq_*`, `int4_*`, and `int8_*`. When provided,
+        `oversample * k` vectors will be gathered and then their scores
+        will be re-computed with the original vectors.  valid values are
+        between `1.0` and `10.0` (inclusive), or `0` exactly to disable
+        oversampling.
+    """
+
+    oversample: Union[float, DefaultType]
+
+    def __init__(
+        self, *, oversample: Union[float, DefaultType] = DEFAULT, **kwargs: Any
+    ):
+        if oversample is not DEFAULT:
+            kwargs["oversample"] = oversample
+        super().__init__(kwargs)
+
+
+class EmptyObject(AttrDict[Any]):
+    """
+    For empty Class assignments
+    """
+
+    def __init__(self, **kwargs: Any):
+        super().__init__(kwargs)
+
+
+class EwmaModelSettings(AttrDict[Any]):
+    """
+    :arg alpha:
+    """
+
+    alpha: Union[float, DefaultType]
+
+    def __init__(self, *, alpha: Union[float, DefaultType] = DEFAULT, **kwargs: Any):
+        if alpha is not DEFAULT:
+            kwargs["alpha"] = alpha
+        super().__init__(kwargs)
+
+
+class ExtendedBounds(AttrDict[Any]):
+    """
+    :arg max: Maximum value for the bound.
+    :arg min: Minimum value for the bound.
+    """
+
+    max: Any
+    min: Any
+
+    def __init__(self, *, max: Any = DEFAULT, min: Any = DEFAULT, **kwargs: Any):
+        if max is not DEFAULT:
+            kwargs["max"] = max
+        if min is not DEFAULT:
+            kwargs["min"] = min
+        super().__init__(kwargs)
+
+
+class FieldAndFormat(AttrDict[Any]):
+    """
+    A reference to a field with formatting instructions on how to return
+    the value
+
+    :arg field: (required) A wildcard pattern. The request returns values
+        for field names matching this pattern.
+    :arg format: The format in which the values are returned.
+    :arg include_unmapped:
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+    format: Union[str, DefaultType]
+    include_unmapped: Union[bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        format: Union[str, DefaultType] = DEFAULT,
+        include_unmapped: Union[bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        if format is not DEFAULT:
+            kwargs["format"] = format
+        if include_unmapped is not DEFAULT:
+            kwargs["include_unmapped"] = include_unmapped
+        super().__init__(kwargs)
+
+
+class FieldCollapse(AttrDict[Any]):
+    """
+    :arg field: (required) The field to collapse the result set on
+    :arg inner_hits: The number of inner hits and their sort order
+    :arg max_concurrent_group_searches: The number of concurrent requests
+        allowed to retrieve the inner_hits per group
+    :arg collapse:
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+    inner_hits: Union[
+        "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType
+    ]
+    max_concurrent_group_searches: Union[int, DefaultType]
+    collapse: Union["FieldCollapse", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        inner_hits: Union[
+            "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType
+        ] = DEFAULT,
+        max_concurrent_group_searches: Union[int, DefaultType] = DEFAULT,
+        collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        if inner_hits is not DEFAULT:
+            kwargs["inner_hits"] = inner_hits
+        if max_concurrent_group_searches is not DEFAULT:
+            kwargs["max_concurrent_group_searches"] = max_concurrent_group_searches
+        if collapse is not DEFAULT:
+            kwargs["collapse"] = collapse
+        super().__init__(kwargs)
+
+
+class FieldLookup(AttrDict[Any]):
+    """
+    :arg id: (required) `id` of the document.
+    :arg index: Index from which to retrieve the document.
+    :arg path: Name of the field.
+    :arg routing: Custom routing value.
+    """
+
+    id: Union[str, DefaultType]
+    index: Union[str, DefaultType]
+    path: Union[str, InstrumentedField, DefaultType]
+    routing: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        id: Union[str, DefaultType] = DEFAULT,
+        index: Union[str, DefaultType] = DEFAULT,
+        path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        routing: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if id is not DEFAULT:
+            kwargs["id"] = id
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if path is not DEFAULT:
+            kwargs["path"] = str(path)
+        if routing is not DEFAULT:
+            kwargs["routing"] = routing
+        super().__init__(kwargs)
+
+
+class FieldSort(AttrDict[Any]):
+    """
+    :arg missing:
+    :arg mode:
+    :arg nested:
+    :arg order:
+    :arg unmapped_type:
+    :arg numeric_type:
+    :arg format:
+    """
+
+    missing: Union[str, int, float, bool, DefaultType]
+    mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+    nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+    order: Union[Literal["asc", "desc"], DefaultType]
+    unmapped_type: Union[
+        Literal[
+            "none",
+            "geo_point",
+            "geo_shape",
+            "ip",
+            "binary",
+            "keyword",
+            "text",
+            "search_as_you_type",
+            "date",
+            "date_nanos",
+            "boolean",
+            "completion",
+            "nested",
+            "object",
+            "passthrough",
+            "version",
+            "murmur3",
+            "token_count",
+            "percolator",
+            "integer",
+            "long",
+            "short",
+            "byte",
+            "float",
+            "half_float",
+            "scaled_float",
+            "double",
+            "integer_range",
+            "float_range",
+            "long_range",
+            "double_range",
+            "date_range",
+            "ip_range",
+            "alias",
+            "join",
+            "rank_feature",
+            "rank_features",
+            "flattened",
+            "shape",
+            "histogram",
+            "constant_keyword",
+            "counted_keyword",
+            "aggregate_metric_double",
+            "dense_vector",
+            "semantic_text",
+            "sparse_vector",
+            "match_only_text",
+            "icu_collation_keyword",
+        ],
+        DefaultType,
+    ]
+    numeric_type: Union[Literal["long", "double", "date", "date_nanos"], DefaultType]
+    format: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        missing: Union[str, int, float, bool, DefaultType] = DEFAULT,
+        mode: Union[
+            Literal["min", "max", "sum", "avg", "median"], DefaultType
+        ] = DEFAULT,
+        nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+        order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+        unmapped_type: Union[
+            Literal[
+                "none",
+                "geo_point",
+                "geo_shape",
+                "ip",
+                "binary",
+                "keyword",
+                "text",
+                "search_as_you_type",
+                "date",
+                "date_nanos",
+                "boolean",
+                "completion",
+                "nested",
+                "object",
+                "passthrough",
+                "version",
+                "murmur3",
+                "token_count",
+                "percolator",
+                "integer",
+                "long",
+                "short",
+                "byte",
+                "float",
+                "half_float",
+                "scaled_float",
+                "double",
+                "integer_range",
+                "float_range",
+                "long_range",
+                "double_range",
+                "date_range",
+                "ip_range",
+                "alias",
+                "join",
+                "rank_feature",
+                "rank_features",
+                "flattened",
+                "shape",
+                "histogram",
+                "constant_keyword",
+                "counted_keyword",
+                "aggregate_metric_double",
+                "dense_vector",
+                "semantic_text",
+                "sparse_vector",
+                "match_only_text",
+                "icu_collation_keyword",
+            ],
+            DefaultType,
+        ] = DEFAULT,
+        numeric_type: Union[
+            Literal["long", "double", "date", "date_nanos"], DefaultType
+        ] = DEFAULT,
+        format: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if missing is not DEFAULT:
+            kwargs["missing"] = missing
+        if mode is not DEFAULT:
+            kwargs["mode"] = mode
+        if nested is not DEFAULT:
+            kwargs["nested"] = nested
+        if order is not DEFAULT:
+            kwargs["order"] = order
+        if unmapped_type is not DEFAULT:
+            kwargs["unmapped_type"] = unmapped_type
+        if numeric_type is not DEFAULT:
+            kwargs["numeric_type"] = numeric_type
+        if format is not DEFAULT:
+            kwargs["format"] = format
+        super().__init__(kwargs)
+
+
+class FielddataFrequencyFilter(AttrDict[Any]):
+    """
+    :arg max: (required)
+    :arg min: (required)
+    :arg min_segment_size: (required)
+    """
+
+    max: Union[float, DefaultType]
+    min: Union[float, DefaultType]
+    min_segment_size: Union[int, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        max: Union[float, DefaultType] = DEFAULT,
+        min: Union[float, DefaultType] = DEFAULT,
+        min_segment_size: Union[int, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if max is not DEFAULT:
+            kwargs["max"] = max
+        if min is not DEFAULT:
+            kwargs["min"] = min
+        if min_segment_size is not DEFAULT:
+            kwargs["min_segment_size"] = min_segment_size
+        super().__init__(kwargs)
+
+
+class FrequentItemSetsField(AttrDict[Any]):
+    """
+    :arg field: (required)
+    :arg exclude: Values to exclude. Can be regular expression strings or
+        arrays of strings of exact terms.
+    :arg include: Values to include. Can be regular expression strings or
+        arrays of strings of exact terms.
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+    exclude: Union[str, Sequence[str], DefaultType]
+    include: Union[str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        exclude: Union[str, Sequence[str], DefaultType] = DEFAULT,
+        include: Union[
+            str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        if exclude is not DEFAULT:
+            kwargs["exclude"] = exclude
+        if include is not DEFAULT:
+            kwargs["include"] = include
+        super().__init__(kwargs)
+
+
+class FuzzyQuery(AttrDict[Any]):
+    """
+    :arg value: (required) Term you wish to find in the provided field.
+    :arg max_expansions: Maximum number of variations created. Defaults to
+        `50` if omitted.
+    :arg prefix_length: Number of beginning characters left unchanged when
+        creating expansions.
+    :arg rewrite: Number of beginning characters left unchanged when
+        creating expansions. Defaults to `constant_score` if omitted.
+    :arg transpositions: Indicates whether edits include transpositions of
+        two adjacent characters (for example `ab` to `ba`). Defaults to
+        `True` if omitted.
+    :arg fuzziness: Maximum edit distance allowed for matching.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    value: Union[str, float, bool, DefaultType]
+    max_expansions: Union[int, DefaultType]
+    prefix_length: Union[int, DefaultType]
+    rewrite: Union[str, DefaultType]
+    transpositions: Union[bool, DefaultType]
+    fuzziness: Union[str, int, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        value: Union[str, float, bool, DefaultType] = DEFAULT,
+        max_expansions: Union[int, DefaultType] = DEFAULT,
+        prefix_length: Union[int, DefaultType] = DEFAULT,
+        rewrite: Union[str, DefaultType] = DEFAULT,
+        transpositions: Union[bool, DefaultType] = DEFAULT,
+        fuzziness: Union[str, int, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if value is not DEFAULT:
+            kwargs["value"] = value
+        if max_expansions is not DEFAULT:
+            kwargs["max_expansions"] = max_expansions
+        if prefix_length is not DEFAULT:
+            kwargs["prefix_length"] = prefix_length
+        if rewrite is not DEFAULT:
+            kwargs["rewrite"] = rewrite
+        if transpositions is not DEFAULT:
+            kwargs["transpositions"] = transpositions
+        if fuzziness is not DEFAULT:
+            kwargs["fuzziness"] = fuzziness
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class GeoDistanceSort(AttrDict[Any]):
+    """
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    :arg mode:
+    :arg distance_type:
+    :arg ignore_unmapped:
+    :arg order:
+    :arg unit:
+    :arg nested:
+    """
+
+    _field: Union[str, "InstrumentedField", "DefaultType"]
+    _value: Union[
+        Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str],
+        Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]],
+        Dict[str, Any],
+        "DefaultType",
+    ]
+    mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+    distance_type: Union[Literal["arc", "plane"], DefaultType]
+    ignore_unmapped: Union[bool, DefaultType]
+    order: Union[Literal["asc", "desc"], DefaultType]
+    unit: Union[
+        Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
+    ]
+    nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union[
+            Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str],
+            Sequence[
+                Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
+            ],
+            Dict[str, Any],
+            "DefaultType",
+        ] = DEFAULT,
+        *,
+        mode: Union[
+            Literal["min", "max", "sum", "avg", "median"], DefaultType
+        ] = DEFAULT,
+        distance_type: Union[Literal["arc", "plane"], DefaultType] = DEFAULT,
+        ignore_unmapped: Union[bool, DefaultType] = DEFAULT,
+        order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+        unit: Union[
+            Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType
+        ] = DEFAULT,
+        nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        if mode is not DEFAULT:
+            kwargs["mode"] = mode
+        if distance_type is not DEFAULT:
+            kwargs["distance_type"] = distance_type
+        if ignore_unmapped is not DEFAULT:
+            kwargs["ignore_unmapped"] = ignore_unmapped
+        if order is not DEFAULT:
+            kwargs["order"] = order
+        if unit is not DEFAULT:
+            kwargs["unit"] = unit
+        if nested is not DEFAULT:
+            kwargs["nested"] = nested
+        super().__init__(kwargs)
+
+
+class GeoGridQuery(AttrDict[Any]):
+    """
+    :arg geotile:
+    :arg geohash:
+    :arg geohex:
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    geotile: Union[str, DefaultType]
+    geohash: Union[str, DefaultType]
+    geohex: Union[str, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        geotile: Union[str, DefaultType] = DEFAULT,
+        geohash: Union[str, DefaultType] = DEFAULT,
+        geohex: Union[str, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if geotile is not DEFAULT:
+            kwargs["geotile"] = geotile
+        if geohash is not DEFAULT:
+            kwargs["geohash"] = geohash
+        if geohex is not DEFAULT:
+            kwargs["geohex"] = geohex
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class GeoHashLocation(AttrDict[Any]):
+    """
+    :arg geohash: (required)
+    """
+
+    geohash: Union[str, DefaultType]
+
+    def __init__(self, *, geohash: Union[str, DefaultType] = DEFAULT, **kwargs: Any):
+        if geohash is not DEFAULT:
+            kwargs["geohash"] = geohash
+        super().__init__(kwargs)
+
+
+class GeoLinePoint(AttrDict[Any]):
+    """
+    :arg field: (required) The name of the geo_point field.
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        super().__init__(kwargs)
+
+
+class GeoLineSort(AttrDict[Any]):
+    """
+    :arg field: (required) The name of the numeric field to use as the
+        sort key for ordering the points.
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        super().__init__(kwargs)
+
+
+class GeoPolygonPoints(AttrDict[Any]):
+    """
+    :arg points: (required)
+    """
+
+    points: Union[
+        Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]],
+        Dict[str, Any],
+        DefaultType,
+    ]
+
+    def __init__(
+        self,
+        *,
+        points: Union[
+            Sequence[
+                Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
+            ],
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if points is not DEFAULT:
+            kwargs["points"] = points
+        super().__init__(kwargs)
+
+
+class GeoShapeFieldQuery(AttrDict[Any]):
+    """
+    :arg shape:
+    :arg indexed_shape: Query using an indexed shape retrieved from the
+        the specified document and path.
+    :arg relation: Spatial relation operator used to search a geo field.
+        Defaults to `intersects` if omitted.
+    """
+
+    shape: Any
+    indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType]
+    relation: Union[
+        Literal["intersects", "disjoint", "within", "contains"], DefaultType
+    ]
+
+    def __init__(
+        self,
+        *,
+        shape: Any = DEFAULT,
+        indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT,
+        relation: Union[
+            Literal["intersects", "disjoint", "within", "contains"], DefaultType
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if shape is not DEFAULT:
+            kwargs["shape"] = shape
+        if indexed_shape is not DEFAULT:
+            kwargs["indexed_shape"] = indexed_shape
+        if relation is not DEFAULT:
+            kwargs["relation"] = relation
+        super().__init__(kwargs)
+
+
+class GoogleNormalizedDistanceHeuristic(AttrDict[Any]):
+    """
+    :arg background_is_superset: Set to `false` if you defined a custom
+        background filter that represents a different set of documents
+        that you want to compare to.
+    """
+
+    background_is_superset: Union[bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        background_is_superset: Union[bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if background_is_superset is not DEFAULT:
+            kwargs["background_is_superset"] = background_is_superset
+        super().__init__(kwargs)
+
+
+class HdrMethod(AttrDict[Any]):
+    """
+    :arg number_of_significant_value_digits: Specifies the resolution of
+        values for the histogram in number of significant digits.
+    """
+
+    number_of_significant_value_digits: Union[int, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        number_of_significant_value_digits: Union[int, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if number_of_significant_value_digits is not DEFAULT:
+            kwargs["number_of_significant_value_digits"] = (
+                number_of_significant_value_digits
+            )
+        super().__init__(kwargs)
+
+
+class Highlight(AttrDict[Any]):
+    """
+    :arg fields: (required)
+    :arg encoder:
+    :arg type:
+    :arg boundary_chars: A string that contains each boundary character.
+        Defaults to `.,!? \t\n` if omitted.
+    :arg boundary_max_scan: How far to scan for boundary characters.
+        Defaults to `20` if omitted.
+    :arg boundary_scanner: Specifies how to break the highlighted
+        fragments: chars, sentence, or word. Only valid for the unified
+        and fvh highlighters. Defaults to `sentence` for the `unified`
+        highlighter. Defaults to `chars` for the `fvh` highlighter.
+    :arg boundary_scanner_locale: Controls which locale is used to search
+        for sentence and word boundaries. This parameter takes a form of a
+        language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
+        Defaults to `Locale.ROOT` if omitted.
+    :arg force_source:
+    :arg fragmenter: Specifies how text should be broken up in highlight
+        snippets: `simple` or `span`. Only valid for the `plain`
+        highlighter. Defaults to `span` if omitted.
+    :arg fragment_size: The size of the highlighted fragment in
+        characters. Defaults to `100` if omitted.
+    :arg highlight_filter:
+    :arg highlight_query: Highlight matches for a query other than the
+        search query. This is especially useful if you use a rescore query
+        because those are not taken into account by highlighting by
+        default.
+    :arg max_fragment_length:
+    :arg max_analyzed_offset: If set to a non-negative value, highlighting
+        stops at this defined maximum limit. The rest of the text is not
+        processed, thus not highlighted and no error is returned The
+        `max_analyzed_offset` query setting does not override the
+        `index.highlight.max_analyzed_offset` setting, which prevails when
+        it’s set to lower value than the query setting.
+    :arg no_match_size: The amount of text you want to return from the
+        beginning of the field if there are no matching fragments to
+        highlight.
+    :arg number_of_fragments: The maximum number of fragments to return.
+        If the number of fragments is set to `0`, no fragments are
+        returned. Instead, the entire field contents are highlighted and
+        returned. This can be handy when you need to highlight short texts
+        such as a title or address, but fragmentation is not required. If
+        `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
+        to `5` if omitted.
+    :arg options:
+    :arg order: Sorts highlighted fragments by score when set to `score`.
+        By default, fragments will be output in the order they appear in
+        the field (order: `none`). Setting this option to `score` will
+        output the most relevant fragments first. Each highlighter applies
+        its own logic to compute relevancy scores. Defaults to `none` if
+        omitted.
+    :arg phrase_limit: Controls the number of matching phrases in a
+        document that are considered. Prevents the `fvh` highlighter from
+        analyzing too many phrases and consuming too much memory. When
+        using `matched_fields`, `phrase_limit` phrases per matched field
+        are considered. Raising the limit increases query time and
+        consumes more memory. Only supported by the `fvh` highlighter.
+        Defaults to `256` if omitted.
+    :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
+        tags to use for the highlighted text. By default, highlighted text
+        is wrapped in `<em>` and `</em>` tags.
+    :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
+        tags to use for the highlighted text. By default, highlighted text
+        is wrapped in `<em>` and `</em>` tags.
+    :arg require_field_match: By default, only fields that contains a
+        query match are highlighted. Set to `false` to highlight all
+        fields. Defaults to `True` if omitted.
+    :arg tags_schema: Set to `styled` to use the built-in tag schema.
+    """
+
+    fields: Union[
+        Mapping[Union[str, InstrumentedField], "HighlightField"],
+        Sequence[Mapping[Union[str, InstrumentedField], "HighlightField"]],
+        Dict[str, Any],
+        DefaultType,
+    ]
+    encoder: Union[Literal["default", "html"], DefaultType]
+    type: Union[Literal["plain", "fvh", "unified"], DefaultType]
+    boundary_chars: Union[str, DefaultType]
+    boundary_max_scan: Union[int, DefaultType]
+    boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
+    boundary_scanner_locale: Union[str, DefaultType]
+    force_source: Union[bool, DefaultType]
+    fragmenter: Union[Literal["simple", "span"], DefaultType]
+    fragment_size: Union[int, DefaultType]
+    highlight_filter: Union[bool, DefaultType]
+    highlight_query: Union[Query, DefaultType]
+    max_fragment_length: Union[int, DefaultType]
+    max_analyzed_offset: Union[int, DefaultType]
+    no_match_size: Union[int, DefaultType]
+    number_of_fragments: Union[int, DefaultType]
+    options: Union[Mapping[str, Any], DefaultType]
+    order: Union[Literal["score"], DefaultType]
+    phrase_limit: Union[int, DefaultType]
+    post_tags: Union[Sequence[str], DefaultType]
+    pre_tags: Union[Sequence[str], DefaultType]
+    require_field_match: Union[bool, DefaultType]
+    tags_schema: Union[Literal["styled"], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        fields: Union[
+            Mapping[Union[str, InstrumentedField], "HighlightField"],
+            Sequence[Mapping[Union[str, InstrumentedField], "HighlightField"]],
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        encoder: Union[Literal["default", "html"], DefaultType] = DEFAULT,
+        type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
+        boundary_chars: Union[str, DefaultType] = DEFAULT,
+        boundary_max_scan: Union[int, DefaultType] = DEFAULT,
+        boundary_scanner: Union[
+            Literal["chars", "sentence", "word"], DefaultType
+        ] = DEFAULT,
+        boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
+        force_source: Union[bool, DefaultType] = DEFAULT,
+        fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
+        fragment_size: Union[int, DefaultType] = DEFAULT,
+        highlight_filter: Union[bool, DefaultType] = DEFAULT,
+        highlight_query: Union[Query, DefaultType] = DEFAULT,
+        max_fragment_length: Union[int, DefaultType] = DEFAULT,
+        max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
+        no_match_size: Union[int, DefaultType] = DEFAULT,
+        number_of_fragments: Union[int, DefaultType] = DEFAULT,
+        options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
+        order: Union[Literal["score"], DefaultType] = DEFAULT,
+        phrase_limit: Union[int, DefaultType] = DEFAULT,
+        post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+        pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+        require_field_match: Union[bool, DefaultType] = DEFAULT,
+        tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if fields is not DEFAULT:
+            kwargs["fields"] = str(fields)
+        if encoder is not DEFAULT:
+            kwargs["encoder"] = encoder
+        if type is not DEFAULT:
+            kwargs["type"] = type
+        if boundary_chars is not DEFAULT:
+            kwargs["boundary_chars"] = boundary_chars
+        if boundary_max_scan is not DEFAULT:
+            kwargs["boundary_max_scan"] = boundary_max_scan
+        if boundary_scanner is not DEFAULT:
+            kwargs["boundary_scanner"] = boundary_scanner
+        if boundary_scanner_locale is not DEFAULT:
+            kwargs["boundary_scanner_locale"] = boundary_scanner_locale
+        if force_source is not DEFAULT:
+            kwargs["force_source"] = force_source
+        if fragmenter is not DEFAULT:
+            kwargs["fragmenter"] = fragmenter
+        if fragment_size is not DEFAULT:
+            kwargs["fragment_size"] = fragment_size
+        if highlight_filter is not DEFAULT:
+            kwargs["highlight_filter"] = highlight_filter
+        if highlight_query is not DEFAULT:
+            kwargs["highlight_query"] = highlight_query
+        if max_fragment_length is not DEFAULT:
+            kwargs["max_fragment_length"] = max_fragment_length
+        if max_analyzed_offset is not DEFAULT:
+            kwargs["max_analyzed_offset"] = max_analyzed_offset
+        if no_match_size is not DEFAULT:
+            kwargs["no_match_size"] = no_match_size
+        if number_of_fragments is not DEFAULT:
+            kwargs["number_of_fragments"] = number_of_fragments
+        if options is not DEFAULT:
+            kwargs["options"] = options
+        if order is not DEFAULT:
+            kwargs["order"] = order
+        if phrase_limit is not DEFAULT:
+            kwargs["phrase_limit"] = phrase_limit
+        if post_tags is not DEFAULT:
+            kwargs["post_tags"] = post_tags
+        if pre_tags is not DEFAULT:
+            kwargs["pre_tags"] = pre_tags
+        if require_field_match is not DEFAULT:
+            kwargs["require_field_match"] = require_field_match
+        if tags_schema is not DEFAULT:
+            kwargs["tags_schema"] = tags_schema
+        super().__init__(kwargs)
+
+
+class HighlightField(AttrDict[Any]):
+    """
+    :arg fragment_offset:
+    :arg matched_fields:
+    :arg type:
+    :arg boundary_chars: A string that contains each boundary character.
+        Defaults to `.,!? \t\n` if omitted.
+    :arg boundary_max_scan: How far to scan for boundary characters.
+        Defaults to `20` if omitted.
+    :arg boundary_scanner: Specifies how to break the highlighted
+        fragments: chars, sentence, or word. Only valid for the unified
+        and fvh highlighters. Defaults to `sentence` for the `unified`
+        highlighter. Defaults to `chars` for the `fvh` highlighter.
+    :arg boundary_scanner_locale: Controls which locale is used to search
+        for sentence and word boundaries. This parameter takes a form of a
+        language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`.
+        Defaults to `Locale.ROOT` if omitted.
+    :arg force_source:
+    :arg fragmenter: Specifies how text should be broken up in highlight
+        snippets: `simple` or `span`. Only valid for the `plain`
+        highlighter. Defaults to `span` if omitted.
+    :arg fragment_size: The size of the highlighted fragment in
+        characters. Defaults to `100` if omitted.
+    :arg highlight_filter:
+    :arg highlight_query: Highlight matches for a query other than the
+        search query. This is especially useful if you use a rescore query
+        because those are not taken into account by highlighting by
+        default.
+    :arg max_fragment_length:
+    :arg max_analyzed_offset: If set to a non-negative value, highlighting
+        stops at this defined maximum limit. The rest of the text is not
+        processed, thus not highlighted and no error is returned The
+        `max_analyzed_offset` query setting does not override the
+        `index.highlight.max_analyzed_offset` setting, which prevails when
+        it’s set to lower value than the query setting.
+    :arg no_match_size: The amount of text you want to return from the
+        beginning of the field if there are no matching fragments to
+        highlight.
+    :arg number_of_fragments: The maximum number of fragments to return.
+        If the number of fragments is set to `0`, no fragments are
+        returned. Instead, the entire field contents are highlighted and
+        returned. This can be handy when you need to highlight short texts
+        such as a title or address, but fragmentation is not required. If
+        `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults
+        to `5` if omitted.
+    :arg options:
+    :arg order: Sorts highlighted fragments by score when set to `score`.
+        By default, fragments will be output in the order they appear in
+        the field (order: `none`). Setting this option to `score` will
+        output the most relevant fragments first. Each highlighter applies
+        its own logic to compute relevancy scores. Defaults to `none` if
+        omitted.
+    :arg phrase_limit: Controls the number of matching phrases in a
+        document that are considered. Prevents the `fvh` highlighter from
+        analyzing too many phrases and consuming too much memory. When
+        using `matched_fields`, `phrase_limit` phrases per matched field
+        are considered. Raising the limit increases query time and
+        consumes more memory. Only supported by the `fvh` highlighter.
+        Defaults to `256` if omitted.
+    :arg post_tags: Use in conjunction with `pre_tags` to define the HTML
+        tags to use for the highlighted text. By default, highlighted text
+        is wrapped in `<em>` and `</em>` tags.
+    :arg pre_tags: Use in conjunction with `post_tags` to define the HTML
+        tags to use for the highlighted text. By default, highlighted text
+        is wrapped in `<em>` and `</em>` tags.
+    :arg require_field_match: By default, only fields that contains a
+        query match are highlighted. Set to `false` to highlight all
+        fields. Defaults to `True` if omitted.
+    :arg tags_schema: Set to `styled` to use the built-in tag schema.
+    """
+
+    fragment_offset: Union[int, DefaultType]
+    matched_fields: Union[
+        Union[str, InstrumentedField],
+        Sequence[Union[str, InstrumentedField]],
+        DefaultType,
+    ]
+    type: Union[Literal["plain", "fvh", "unified"], DefaultType]
+    boundary_chars: Union[str, DefaultType]
+    boundary_max_scan: Union[int, DefaultType]
+    boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType]
+    boundary_scanner_locale: Union[str, DefaultType]
+    force_source: Union[bool, DefaultType]
+    fragmenter: Union[Literal["simple", "span"], DefaultType]
+    fragment_size: Union[int, DefaultType]
+    highlight_filter: Union[bool, DefaultType]
+    highlight_query: Union[Query, DefaultType]
+    max_fragment_length: Union[int, DefaultType]
+    max_analyzed_offset: Union[int, DefaultType]
+    no_match_size: Union[int, DefaultType]
+    number_of_fragments: Union[int, DefaultType]
+    options: Union[Mapping[str, Any], DefaultType]
+    order: Union[Literal["score"], DefaultType]
+    phrase_limit: Union[int, DefaultType]
+    post_tags: Union[Sequence[str], DefaultType]
+    pre_tags: Union[Sequence[str], DefaultType]
+    require_field_match: Union[bool, DefaultType]
+    tags_schema: Union[Literal["styled"], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        fragment_offset: Union[int, DefaultType] = DEFAULT,
+        matched_fields: Union[
+            Union[str, InstrumentedField],
+            Sequence[Union[str, InstrumentedField]],
+            DefaultType,
+        ] = DEFAULT,
+        type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT,
+        boundary_chars: Union[str, DefaultType] = DEFAULT,
+        boundary_max_scan: Union[int, DefaultType] = DEFAULT,
+        boundary_scanner: Union[
+            Literal["chars", "sentence", "word"], DefaultType
+        ] = DEFAULT,
+        boundary_scanner_locale: Union[str, DefaultType] = DEFAULT,
+        force_source: Union[bool, DefaultType] = DEFAULT,
+        fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT,
+        fragment_size: Union[int, DefaultType] = DEFAULT,
+        highlight_filter: Union[bool, DefaultType] = DEFAULT,
+        highlight_query: Union[Query, DefaultType] = DEFAULT,
+        max_fragment_length: Union[int, DefaultType] = DEFAULT,
+        max_analyzed_offset: Union[int, DefaultType] = DEFAULT,
+        no_match_size: Union[int, DefaultType] = DEFAULT,
+        number_of_fragments: Union[int, DefaultType] = DEFAULT,
+        options: Union[Mapping[str, Any], DefaultType] = DEFAULT,
+        order: Union[Literal["score"], DefaultType] = DEFAULT,
+        phrase_limit: Union[int, DefaultType] = DEFAULT,
+        post_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+        pre_tags: Union[Sequence[str], DefaultType] = DEFAULT,
+        require_field_match: Union[bool, DefaultType] = DEFAULT,
+        tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if fragment_offset is not DEFAULT:
+            kwargs["fragment_offset"] = fragment_offset
+        if matched_fields is not DEFAULT:
+            kwargs["matched_fields"] = str(matched_fields)
+        if type is not DEFAULT:
+            kwargs["type"] = type
+        if boundary_chars is not DEFAULT:
+            kwargs["boundary_chars"] = boundary_chars
+        if boundary_max_scan is not DEFAULT:
+            kwargs["boundary_max_scan"] = boundary_max_scan
+        if boundary_scanner is not DEFAULT:
+            kwargs["boundary_scanner"] = boundary_scanner
+        if boundary_scanner_locale is not DEFAULT:
+            kwargs["boundary_scanner_locale"] = boundary_scanner_locale
+        if force_source is not DEFAULT:
+            kwargs["force_source"] = force_source
+        if fragmenter is not DEFAULT:
+            kwargs["fragmenter"] = fragmenter
+        if fragment_size is not DEFAULT:
+            kwargs["fragment_size"] = fragment_size
+        if highlight_filter is not DEFAULT:
+            kwargs["highlight_filter"] = highlight_filter
+        if highlight_query is not DEFAULT:
+            kwargs["highlight_query"] = highlight_query
+        if max_fragment_length is not DEFAULT:
+            kwargs["max_fragment_length"] = max_fragment_length
+        if max_analyzed_offset is not DEFAULT:
+            kwargs["max_analyzed_offset"] = max_analyzed_offset
+        if no_match_size is not DEFAULT:
+            kwargs["no_match_size"] = no_match_size
+        if number_of_fragments is not DEFAULT:
+            kwargs["number_of_fragments"] = number_of_fragments
+        if options is not DEFAULT:
+            kwargs["options"] = options
+        if order is not DEFAULT:
+            kwargs["order"] = order
+        if phrase_limit is not DEFAULT:
+            kwargs["phrase_limit"] = phrase_limit
+        if post_tags is not DEFAULT:
+            kwargs["post_tags"] = post_tags
+        if pre_tags is not DEFAULT:
+            kwargs["pre_tags"] = pre_tags
+        if require_field_match is not DEFAULT:
+            kwargs["require_field_match"] = require_field_match
+        if tags_schema is not DEFAULT:
+            kwargs["tags_schema"] = tags_schema
+        super().__init__(kwargs)
+
+
+class HoltLinearModelSettings(AttrDict[Any]):
+    """
+    :arg alpha:
+    :arg beta:
+    """
+
+    alpha: Union[float, DefaultType]
+    beta: Union[float, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        alpha: Union[float, DefaultType] = DEFAULT,
+        beta: Union[float, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if alpha is not DEFAULT:
+            kwargs["alpha"] = alpha
+        if beta is not DEFAULT:
+            kwargs["beta"] = beta
+        super().__init__(kwargs)
+
+
+class HoltWintersModelSettings(AttrDict[Any]):
+    """
+    :arg alpha:
+    :arg beta:
+    :arg gamma:
+    :arg pad:
+    :arg period:
+    :arg type:
+    """
+
+    alpha: Union[float, DefaultType]
+    beta: Union[float, DefaultType]
+    gamma: Union[float, DefaultType]
+    pad: Union[bool, DefaultType]
+    period: Union[int, DefaultType]
+    type: Union[Literal["add", "mult"], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        alpha: Union[float, DefaultType] = DEFAULT,
+        beta: Union[float, DefaultType] = DEFAULT,
+        gamma: Union[float, DefaultType] = DEFAULT,
+        pad: Union[bool, DefaultType] = DEFAULT,
+        period: Union[int, DefaultType] = DEFAULT,
+        type: Union[Literal["add", "mult"], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if alpha is not DEFAULT:
+            kwargs["alpha"] = alpha
+        if beta is not DEFAULT:
+            kwargs["beta"] = beta
+        if gamma is not DEFAULT:
+            kwargs["gamma"] = gamma
+        if pad is not DEFAULT:
+            kwargs["pad"] = pad
+        if period is not DEFAULT:
+            kwargs["period"] = period
+        if type is not DEFAULT:
+            kwargs["type"] = type
+        super().__init__(kwargs)
+
+
+class InferenceConfigContainer(AttrDict[Any]):
+    """
+    :arg regression: Regression configuration for inference.
+    :arg classification: Classification configuration for inference.
+    """
+
+    regression: Union["RegressionInferenceOptions", Dict[str, Any], DefaultType]
+    classification: Union["ClassificationInferenceOptions", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        regression: Union[
+            "RegressionInferenceOptions", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        classification: Union[
+            "ClassificationInferenceOptions", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if regression is not DEFAULT:
+            kwargs["regression"] = regression
+        if classification is not DEFAULT:
+            kwargs["classification"] = classification
+        super().__init__(kwargs)
+
+
+class InnerHits(AttrDict[Any]):
+    """
+    :arg name: The name for the particular inner hit definition in the
+        response. Useful when a search request contains multiple inner
+        hits.
+    :arg size: The maximum number of hits to return per `inner_hits`.
+        Defaults to `3` if omitted.
+    :arg from: Inner hit starting document offset.
+    :arg collapse:
+    :arg docvalue_fields:
+    :arg explain:
+    :arg highlight:
+    :arg ignore_unmapped:
+    :arg script_fields:
+    :arg seq_no_primary_term:
+    :arg fields:
+    :arg sort: How the inner hits should be sorted per `inner_hits`. By
+        default, inner hits are sorted by score.
+    :arg _source:
+    :arg stored_fields:
+    :arg track_scores:
+    :arg version:
+    """
+
+    name: Union[str, DefaultType]
+    size: Union[int, DefaultType]
+    from_: Union[int, DefaultType]
+    collapse: Union["FieldCollapse", Dict[str, Any], DefaultType]
+    docvalue_fields: Union[
+        Sequence["FieldAndFormat"], Sequence[Dict[str, Any]], DefaultType
+    ]
+    explain: Union[bool, DefaultType]
+    highlight: Union["Highlight", Dict[str, Any], DefaultType]
+    ignore_unmapped: Union[bool, DefaultType]
+    script_fields: Union[
+        Mapping[Union[str, InstrumentedField], "ScriptField"],
+        Dict[str, Any],
+        DefaultType,
+    ]
+    seq_no_primary_term: Union[bool, DefaultType]
+    fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType]
+    sort: Union[
+        Union[Union[str, InstrumentedField], "SortOptions"],
+        Sequence[Union[Union[str, InstrumentedField], "SortOptions"]],
+        Dict[str, Any],
+        DefaultType,
+    ]
+    _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType]
+    stored_fields: Union[
+        Union[str, InstrumentedField],
+        Sequence[Union[str, InstrumentedField]],
+        DefaultType,
+    ]
+    track_scores: Union[bool, DefaultType]
+    version: Union[bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        name: Union[str, DefaultType] = DEFAULT,
+        size: Union[int, DefaultType] = DEFAULT,
+        from_: Union[int, DefaultType] = DEFAULT,
+        collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT,
+        docvalue_fields: Union[
+            Sequence["FieldAndFormat"], Sequence[Dict[str, Any]], DefaultType
+        ] = DEFAULT,
+        explain: Union[bool, DefaultType] = DEFAULT,
+        highlight: Union["Highlight", Dict[str, Any], DefaultType] = DEFAULT,
+        ignore_unmapped: Union[bool, DefaultType] = DEFAULT,
+        script_fields: Union[
+            Mapping[Union[str, InstrumentedField], "ScriptField"],
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        seq_no_primary_term: Union[bool, DefaultType] = DEFAULT,
+        fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT,
+        sort: Union[
+            Union[Union[str, InstrumentedField], "SortOptions"],
+            Sequence[Union[Union[str, InstrumentedField], "SortOptions"]],
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType] = DEFAULT,
+        stored_fields: Union[
+            Union[str, InstrumentedField],
+            Sequence[Union[str, InstrumentedField]],
+            DefaultType,
+        ] = DEFAULT,
+        track_scores: Union[bool, DefaultType] = DEFAULT,
+        version: Union[bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if name is not DEFAULT:
+            kwargs["name"] = name
+        if size is not DEFAULT:
+            kwargs["size"] = size
+        if from_ is not DEFAULT:
+            kwargs["from_"] = from_
+        if collapse is not DEFAULT:
+            kwargs["collapse"] = collapse
+        if docvalue_fields is not DEFAULT:
+            kwargs["docvalue_fields"] = docvalue_fields
+        if explain is not DEFAULT:
+            kwargs["explain"] = explain
+        if highlight is not DEFAULT:
+            kwargs["highlight"] = highlight
+        if ignore_unmapped is not DEFAULT:
+            kwargs["ignore_unmapped"] = ignore_unmapped
+        if script_fields is not DEFAULT:
+            kwargs["script_fields"] = str(script_fields)
+        if seq_no_primary_term is not DEFAULT:
+            kwargs["seq_no_primary_term"] = seq_no_primary_term
+        if fields is not DEFAULT:
+            kwargs["fields"] = str(fields)
+        if sort is not DEFAULT:
+            kwargs["sort"] = str(sort)
+        if _source is not DEFAULT:
+            kwargs["_source"] = _source
+        if stored_fields is not DEFAULT:
+            kwargs["stored_fields"] = str(stored_fields)
+        if track_scores is not DEFAULT:
+            kwargs["track_scores"] = track_scores
+        if version is not DEFAULT:
+            kwargs["version"] = version
+        super().__init__(kwargs)
+
+
+class IntervalsAllOf(AttrDict[Any]):
+    """
+    :arg intervals: (required) An array of rules to combine. All rules
+        must produce a match in a document for the overall source to
+        match.
+    :arg max_gaps: Maximum number of positions between the matching terms.
+        Intervals produced by the rules further apart than this are not
+        considered matches. Defaults to `-1` if omitted.
+    :arg ordered: If `true`, intervals produced by the rules should appear
+        in the order in which they are specified.
+    :arg filter: Rule used to filter returned intervals.
+    """
+
+    intervals: Union[
+        Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+    ]
+    max_gaps: Union[int, DefaultType]
+    ordered: Union[bool, DefaultType]
+    filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        intervals: Union[
+            Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+        ] = DEFAULT,
+        max_gaps: Union[int, DefaultType] = DEFAULT,
+        ordered: Union[bool, DefaultType] = DEFAULT,
+        filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if intervals is not DEFAULT:
+            kwargs["intervals"] = intervals
+        if max_gaps is not DEFAULT:
+            kwargs["max_gaps"] = max_gaps
+        if ordered is not DEFAULT:
+            kwargs["ordered"] = ordered
+        if filter is not DEFAULT:
+            kwargs["filter"] = filter
+        super().__init__(kwargs)
+
+
+class IntervalsAnyOf(AttrDict[Any]):
+    """
+    :arg intervals: (required) An array of rules to match.
+    :arg filter: Rule used to filter returned intervals.
+    """
+
+    intervals: Union[
+        Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+    ]
+    filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        intervals: Union[
+            Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType
+        ] = DEFAULT,
+        filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if intervals is not DEFAULT:
+            kwargs["intervals"] = intervals
+        if filter is not DEFAULT:
+            kwargs["filter"] = filter
+        super().__init__(kwargs)
+
+
+class IntervalsContainer(AttrDict[Any]):
+    """
+    :arg all_of: Returns matches that span a combination of other rules.
+    :arg any_of: Returns intervals produced by any of its sub-rules.
+    :arg fuzzy: Matches analyzed text.
+    :arg match: Matches analyzed text.
+    :arg prefix: Matches terms that start with a specified set of
+        characters.
+    :arg range:
+    :arg regexp:
+    :arg wildcard: Matches terms using a wildcard pattern.
+    """
+
+    all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType]
+    any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType]
+    fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType]
+    match: Union["IntervalsMatch", Dict[str, Any], DefaultType]
+    prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType]
+    range: Union["IntervalsRange", Dict[str, Any], DefaultType]
+    regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType]
+    wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT,
+        any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT,
+        fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT,
+        match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT,
+        prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT,
+        range: Union["IntervalsRange", Dict[str, Any], DefaultType] = DEFAULT,
+        regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] = DEFAULT,
+        wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if all_of is not DEFAULT:
+            kwargs["all_of"] = all_of
+        if any_of is not DEFAULT:
+            kwargs["any_of"] = any_of
+        if fuzzy is not DEFAULT:
+            kwargs["fuzzy"] = fuzzy
+        if match is not DEFAULT:
+            kwargs["match"] = match
+        if prefix is not DEFAULT:
+            kwargs["prefix"] = prefix
+        if range is not DEFAULT:
+            kwargs["range"] = range
+        if regexp is not DEFAULT:
+            kwargs["regexp"] = regexp
+        if wildcard is not DEFAULT:
+            kwargs["wildcard"] = wildcard
+        super().__init__(kwargs)
+
+
+class IntervalsFilter(AttrDict[Any]):
+    """
+    :arg after: Query used to return intervals that follow an interval
+        from the `filter` rule.
+    :arg before: Query used to return intervals that occur before an
+        interval from the `filter` rule.
+    :arg contained_by: Query used to return intervals contained by an
+        interval from the `filter` rule.
+    :arg containing: Query used to return intervals that contain an
+        interval from the `filter` rule.
+    :arg not_contained_by: Query used to return intervals that are **not**
+        contained by an interval from the `filter` rule.
+    :arg not_containing: Query used to return intervals that do **not**
+        contain an interval from the `filter` rule.
+    :arg not_overlapping: Query used to return intervals that do **not**
+        overlap with an interval from the `filter` rule.
+    :arg overlapping: Query used to return intervals that overlap with an
+        interval from the `filter` rule.
+    :arg script: Script used to return matching documents. This script
+        must return a boolean value: `true` or `false`.
+    """
+
+    after: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+    before: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+    contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+    containing: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+    not_contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+    not_containing: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+    not_overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+    overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType]
+    script: Union["Script", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        after: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
+        before: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
+        contained_by: Union[
+            "IntervalsContainer", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
+        not_contained_by: Union[
+            "IntervalsContainer", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        not_containing: Union[
+            "IntervalsContainer", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        not_overlapping: Union[
+            "IntervalsContainer", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT,
+        script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if after is not DEFAULT:
+            kwargs["after"] = after
+        if before is not DEFAULT:
+            kwargs["before"] = before
+        if contained_by is not DEFAULT:
+            kwargs["contained_by"] = contained_by
+        if containing is not DEFAULT:
+            kwargs["containing"] = containing
+        if not_contained_by is not DEFAULT:
+            kwargs["not_contained_by"] = not_contained_by
+        if not_containing is not DEFAULT:
+            kwargs["not_containing"] = not_containing
+        if not_overlapping is not DEFAULT:
+            kwargs["not_overlapping"] = not_overlapping
+        if overlapping is not DEFAULT:
+            kwargs["overlapping"] = overlapping
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        super().__init__(kwargs)
+
+
+class IntervalsFuzzy(AttrDict[Any]):
+    """
+    :arg term: (required) The term to match.
+    :arg analyzer: Analyzer used to normalize the term.
+    :arg fuzziness: Maximum edit distance allowed for matching. Defaults
+        to `auto` if omitted.
+    :arg prefix_length: Number of beginning characters left unchanged when
+        creating expansions.
+    :arg transpositions: Indicates whether edits include transpositions of
+        two adjacent characters (for example, `ab` to `ba`). Defaults to
+        `True` if omitted.
+    :arg use_field: If specified, match intervals from this field rather
+        than the top-level field. The `term` is normalized using the
+        search analyzer from this field, unless `analyzer` is specified
+        separately.
+    """
+
+    term: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    fuzziness: Union[str, int, DefaultType]
+    prefix_length: Union[int, DefaultType]
+    transpositions: Union[bool, DefaultType]
+    use_field: Union[str, InstrumentedField, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        term: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        fuzziness: Union[str, int, DefaultType] = DEFAULT,
+        prefix_length: Union[int, DefaultType] = DEFAULT,
+        transpositions: Union[bool, DefaultType] = DEFAULT,
+        use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if term is not DEFAULT:
+            kwargs["term"] = term
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if fuzziness is not DEFAULT:
+            kwargs["fuzziness"] = fuzziness
+        if prefix_length is not DEFAULT:
+            kwargs["prefix_length"] = prefix_length
+        if transpositions is not DEFAULT:
+            kwargs["transpositions"] = transpositions
+        if use_field is not DEFAULT:
+            kwargs["use_field"] = str(use_field)
+        super().__init__(kwargs)
+
+
+class IntervalsMatch(AttrDict[Any]):
+    """
+    :arg query: (required) Text you wish to find in the provided field.
+    :arg analyzer: Analyzer used to analyze terms in the query.
+    :arg max_gaps: Maximum number of positions between the matching terms.
+        Terms further apart than this are not considered matches. Defaults
+        to `-1` if omitted.
+    :arg ordered: If `true`, matching terms must appear in their specified
+        order.
+    :arg use_field: If specified, match intervals from this field rather
+        than the top-level field. The `term` is normalized using the
+        search analyzer from this field, unless `analyzer` is specified
+        separately.
+    :arg filter: An optional interval filter.
+    """
+
+    query: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    max_gaps: Union[int, DefaultType]
+    ordered: Union[bool, DefaultType]
+    use_field: Union[str, InstrumentedField, DefaultType]
+    filter: Union["IntervalsFilter", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        max_gaps: Union[int, DefaultType] = DEFAULT,
+        ordered: Union[bool, DefaultType] = DEFAULT,
+        use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if query is not DEFAULT:
+            kwargs["query"] = query
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if max_gaps is not DEFAULT:
+            kwargs["max_gaps"] = max_gaps
+        if ordered is not DEFAULT:
+            kwargs["ordered"] = ordered
+        if use_field is not DEFAULT:
+            kwargs["use_field"] = str(use_field)
+        if filter is not DEFAULT:
+            kwargs["filter"] = filter
+        super().__init__(kwargs)
+
+
+class IntervalsPrefix(AttrDict[Any]):
+    """
+    :arg prefix: (required) Beginning characters of terms you wish to find
+        in the top-level field.
+    :arg analyzer: Analyzer used to analyze the `prefix`.
+    :arg use_field: If specified, match intervals from this field rather
+        than the top-level field. The `prefix` is normalized using the
+        search analyzer from this field, unless `analyzer` is specified
+        separately.
+    """
+
+    prefix: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    use_field: Union[str, InstrumentedField, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        prefix: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if prefix is not DEFAULT:
+            kwargs["prefix"] = prefix
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if use_field is not DEFAULT:
+            kwargs["use_field"] = str(use_field)
+        super().__init__(kwargs)
+
+
+class IntervalsQuery(AttrDict[Any]):
+    """
+    :arg all_of: Returns matches that span a combination of other rules.
+    :arg any_of: Returns intervals produced by any of its sub-rules.
+    :arg fuzzy: Matches terms that are similar to the provided term,
+        within an edit distance defined by `fuzziness`.
+    :arg match: Matches analyzed text.
+    :arg prefix: Matches terms that start with a specified set of
+        characters.
+    :arg range:
+    :arg regexp:
+    :arg wildcard: Matches terms using a wildcard pattern.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType]
+    any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType]
+    fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType]
+    match: Union["IntervalsMatch", Dict[str, Any], DefaultType]
+    prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType]
+    range: Union["IntervalsRange", Dict[str, Any], DefaultType]
+    regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType]
+    wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT,
+        any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT,
+        fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT,
+        match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT,
+        prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT,
+        range: Union["IntervalsRange", Dict[str, Any], DefaultType] = DEFAULT,
+        regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] = DEFAULT,
+        wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if all_of is not DEFAULT:
+            kwargs["all_of"] = all_of
+        if any_of is not DEFAULT:
+            kwargs["any_of"] = any_of
+        if fuzzy is not DEFAULT:
+            kwargs["fuzzy"] = fuzzy
+        if match is not DEFAULT:
+            kwargs["match"] = match
+        if prefix is not DEFAULT:
+            kwargs["prefix"] = prefix
+        if range is not DEFAULT:
+            kwargs["range"] = range
+        if regexp is not DEFAULT:
+            kwargs["regexp"] = regexp
+        if wildcard is not DEFAULT:
+            kwargs["wildcard"] = wildcard
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class IntervalsRange(AttrDict[Any]):
+    """
+    :arg analyzer: Analyzer used to analyze the `prefix`.
+    :arg gte: Lower term, either gte or gt must be provided.
+    :arg gt: Lower term, either gte or gt must be provided.
+    :arg lte: Upper term, either lte or lt must be provided.
+    :arg lt: Upper term, either lte or lt must be provided.
+    :arg use_field: If specified, match intervals from this field rather
+        than the top-level field. The `prefix` is normalized using the
+        search analyzer from this field, unless `analyzer` is specified
+        separately.
+    """
+
+    analyzer: Union[str, DefaultType]
+    gte: Union[str, DefaultType]
+    gt: Union[str, DefaultType]
+    lte: Union[str, DefaultType]
+    lt: Union[str, DefaultType]
+    use_field: Union[str, InstrumentedField, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        gte: Union[str, DefaultType] = DEFAULT,
+        gt: Union[str, DefaultType] = DEFAULT,
+        lte: Union[str, DefaultType] = DEFAULT,
+        lt: Union[str, DefaultType] = DEFAULT,
+        use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if gte is not DEFAULT:
+            kwargs["gte"] = gte
+        if gt is not DEFAULT:
+            kwargs["gt"] = gt
+        if lte is not DEFAULT:
+            kwargs["lte"] = lte
+        if lt is not DEFAULT:
+            kwargs["lt"] = lt
+        if use_field is not DEFAULT:
+            kwargs["use_field"] = str(use_field)
+        super().__init__(kwargs)
+
+
+class IntervalsRegexp(AttrDict[Any]):
+    """
+    :arg pattern: (required) Regex pattern.
+    :arg analyzer: Analyzer used to analyze the `prefix`.
+    :arg use_field: If specified, match intervals from this field rather
+        than the top-level field. The `prefix` is normalized using the
+        search analyzer from this field, unless `analyzer` is specified
+        separately.
+    """
+
+    pattern: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    use_field: Union[str, InstrumentedField, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        pattern: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if pattern is not DEFAULT:
+            kwargs["pattern"] = pattern
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if use_field is not DEFAULT:
+            kwargs["use_field"] = str(use_field)
+        super().__init__(kwargs)
+
+
+class IntervalsWildcard(AttrDict[Any]):
+    """
+    :arg pattern: (required) Wildcard pattern used to find matching terms.
+    :arg analyzer: Analyzer used to analyze the `pattern`. Defaults to the
+        top-level field's analyzer.
+    :arg use_field: If specified, match intervals from this field rather
+        than the top-level field. The `pattern` is normalized using the
+        search analyzer from this field, unless `analyzer` is specified
+        separately.
+    """
+
+    pattern: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    use_field: Union[str, InstrumentedField, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        pattern: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if pattern is not DEFAULT:
+            kwargs["pattern"] = pattern
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if use_field is not DEFAULT:
+            kwargs["use_field"] = str(use_field)
+        super().__init__(kwargs)
+
+
+class IpRangeAggregationRange(AttrDict[Any]):
+    """
+    :arg from: Start of the range.
+    :arg mask: IP range defined as a CIDR mask.
+    :arg to: End of the range.
+    """
+
+    from_: Union[str, None, DefaultType]
+    mask: Union[str, DefaultType]
+    to: Union[str, None, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        from_: Union[str, None, DefaultType] = DEFAULT,
+        mask: Union[str, DefaultType] = DEFAULT,
+        to: Union[str, None, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if from_ is not DEFAULT:
+            kwargs["from_"] = from_
+        if mask is not DEFAULT:
+            kwargs["mask"] = mask
+        if to is not DEFAULT:
+            kwargs["to"] = to
+        super().__init__(kwargs)
+
+
+class LatLonGeoLocation(AttrDict[Any]):
+    """
+    :arg lat: (required) Latitude
+    :arg lon: (required) Longitude
+    """
+
+    lat: Union[float, DefaultType]
+    lon: Union[float, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        lat: Union[float, DefaultType] = DEFAULT,
+        lon: Union[float, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if lat is not DEFAULT:
+            kwargs["lat"] = lat
+        if lon is not DEFAULT:
+            kwargs["lon"] = lon
+        super().__init__(kwargs)
+
+
+class LikeDocument(AttrDict[Any]):
+    """
+    :arg doc: A document not present in the index.
+    :arg fields:
+    :arg _id: ID of a document.
+    :arg _index: Index of a document.
+    :arg per_field_analyzer: Overrides the default analyzer.
+    :arg routing:
+    :arg version:
+    :arg version_type:  Defaults to `'internal'` if omitted.
+    """
+
+    doc: Any
+    fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType]
+    _id: Union[str, DefaultType]
+    _index: Union[str, DefaultType]
+    per_field_analyzer: Union[Mapping[Union[str, InstrumentedField], str], DefaultType]
+    routing: Union[str, DefaultType]
+    version: Union[int, DefaultType]
+    version_type: Union[Literal["internal", "external", "external_gte"], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        doc: Any = DEFAULT,
+        fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT,
+        _id: Union[str, DefaultType] = DEFAULT,
+        _index: Union[str, DefaultType] = DEFAULT,
+        per_field_analyzer: Union[
+            Mapping[Union[str, InstrumentedField], str], DefaultType
+        ] = DEFAULT,
+        routing: Union[str, DefaultType] = DEFAULT,
+        version: Union[int, DefaultType] = DEFAULT,
+        version_type: Union[
+            Literal["internal", "external", "external_gte"], DefaultType
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if doc is not DEFAULT:
+            kwargs["doc"] = doc
+        if fields is not DEFAULT:
+            kwargs["fields"] = str(fields)
+        if _id is not DEFAULT:
+            kwargs["_id"] = _id
+        if _index is not DEFAULT:
+            kwargs["_index"] = _index
+        if per_field_analyzer is not DEFAULT:
+            kwargs["per_field_analyzer"] = str(per_field_analyzer)
+        if routing is not DEFAULT:
+            kwargs["routing"] = routing
+        if version is not DEFAULT:
+            kwargs["version"] = version
+        if version_type is not DEFAULT:
+            kwargs["version_type"] = version_type
+        super().__init__(kwargs)
+
+
+class MatchBoolPrefixQuery(AttrDict[Any]):
+    """
+    :arg query: (required) Terms you wish to find in the provided field.
+        The last term is used in a prefix query.
+    :arg analyzer: Analyzer used to convert the text in the query value
+        into tokens.
+    :arg fuzziness: Maximum edit distance allowed for matching. Can be
+        applied to the term subqueries constructed for all terms but the
+        final term.
+    :arg fuzzy_rewrite: Method used to rewrite the query. Can be applied
+        to the term subqueries constructed for all terms but the final
+        term.
+    :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
+        transpositions of two adjacent characters (for example, `ab` to
+        `ba`). Can be applied to the term subqueries constructed for all
+        terms but the final term. Defaults to `True` if omitted.
+    :arg max_expansions: Maximum number of terms to which the query will
+        expand. Can be applied to the term subqueries constructed for all
+        terms but the final term. Defaults to `50` if omitted.
+    :arg minimum_should_match: Minimum number of clauses that must match
+        for a document to be returned. Applied to the constructed bool
+        query.
+    :arg operator: Boolean logic used to interpret text in the query
+        value. Applied to the constructed bool query. Defaults to `'or'`
+        if omitted.
+    :arg prefix_length: Number of beginning characters left unchanged for
+        fuzzy matching. Can be applied to the term subqueries constructed
+        for all terms but the final term.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    query: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    fuzziness: Union[str, int, DefaultType]
+    fuzzy_rewrite: Union[str, DefaultType]
+    fuzzy_transpositions: Union[bool, DefaultType]
+    max_expansions: Union[int, DefaultType]
+    minimum_should_match: Union[int, str, DefaultType]
+    operator: Union[Literal["and", "or"], DefaultType]
+    prefix_length: Union[int, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        fuzziness: Union[str, int, DefaultType] = DEFAULT,
+        fuzzy_rewrite: Union[str, DefaultType] = DEFAULT,
+        fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT,
+        max_expansions: Union[int, DefaultType] = DEFAULT,
+        minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
+        operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
+        prefix_length: Union[int, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if query is not DEFAULT:
+            kwargs["query"] = query
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if fuzziness is not DEFAULT:
+            kwargs["fuzziness"] = fuzziness
+        if fuzzy_rewrite is not DEFAULT:
+            kwargs["fuzzy_rewrite"] = fuzzy_rewrite
+        if fuzzy_transpositions is not DEFAULT:
+            kwargs["fuzzy_transpositions"] = fuzzy_transpositions
+        if max_expansions is not DEFAULT:
+            kwargs["max_expansions"] = max_expansions
+        if minimum_should_match is not DEFAULT:
+            kwargs["minimum_should_match"] = minimum_should_match
+        if operator is not DEFAULT:
+            kwargs["operator"] = operator
+        if prefix_length is not DEFAULT:
+            kwargs["prefix_length"] = prefix_length
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class MatchPhrasePrefixQuery(AttrDict[Any]):
+    """
+    :arg query: (required) Text you wish to find in the provided field.
+    :arg analyzer: Analyzer used to convert text in the query value into
+        tokens.
+    :arg max_expansions: Maximum number of terms to which the last
+        provided term of the query value will expand. Defaults to `50` if
+        omitted.
+    :arg slop: Maximum number of positions allowed between matching
+        tokens.
+    :arg zero_terms_query: Indicates whether no documents are returned if
+        the analyzer removes all tokens, such as when using a `stop`
+        filter. Defaults to `none` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    query: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    max_expansions: Union[int, DefaultType]
+    slop: Union[int, DefaultType]
+    zero_terms_query: Union[Literal["all", "none"], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        max_expansions: Union[int, DefaultType] = DEFAULT,
+        slop: Union[int, DefaultType] = DEFAULT,
+        zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if query is not DEFAULT:
+            kwargs["query"] = query
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if max_expansions is not DEFAULT:
+            kwargs["max_expansions"] = max_expansions
+        if slop is not DEFAULT:
+            kwargs["slop"] = slop
+        if zero_terms_query is not DEFAULT:
+            kwargs["zero_terms_query"] = zero_terms_query
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class MatchPhraseQuery(AttrDict[Any]):
+    """
+    :arg query: (required) Query terms that are analyzed and turned into a
+        phrase query.
+    :arg analyzer: Analyzer used to convert the text in the query value
+        into tokens.
+    :arg slop: Maximum number of positions allowed between matching
+        tokens.
+    :arg zero_terms_query: Indicates whether no documents are returned if
+        the `analyzer` removes all tokens, such as when using a `stop`
+        filter. Defaults to `'none'` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    query: Union[str, DefaultType]
+    analyzer: Union[str, DefaultType]
+    slop: Union[int, DefaultType]
+    zero_terms_query: Union[Literal["all", "none"], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        slop: Union[int, DefaultType] = DEFAULT,
+        zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if query is not DEFAULT:
+            kwargs["query"] = query
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if slop is not DEFAULT:
+            kwargs["slop"] = slop
+        if zero_terms_query is not DEFAULT:
+            kwargs["zero_terms_query"] = zero_terms_query
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class MatchQuery(AttrDict[Any]):
+    """
+    :arg query: (required) Text, number, boolean value or date you wish to
+        find in the provided field.
+    :arg analyzer: Analyzer used to convert the text in the query value
+        into tokens.
+    :arg auto_generate_synonyms_phrase_query: If `true`, match phrase
+        queries are automatically created for multi-term synonyms.
+        Defaults to `True` if omitted.
+    :arg cutoff_frequency:
+    :arg fuzziness: Maximum edit distance allowed for matching.
+    :arg fuzzy_rewrite: Method used to rewrite the query.
+    :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include
+        transpositions of two adjacent characters (for example, `ab` to
+        `ba`). Defaults to `True` if omitted.
+    :arg lenient: If `true`, format-based errors, such as providing a text
+        query value for a numeric field, are ignored.
+    :arg max_expansions: Maximum number of terms to which the query will
+        expand. Defaults to `50` if omitted.
+    :arg minimum_should_match: Minimum number of clauses that must match
+        for a document to be returned.
+    :arg operator: Boolean logic used to interpret text in the query
+        value. Defaults to `'or'` if omitted.
+    :arg prefix_length: Number of beginning characters left unchanged for
+        fuzzy matching.
+    :arg zero_terms_query: Indicates whether no documents are returned if
+        the `analyzer` removes all tokens, such as when using a `stop`
+        filter. Defaults to `'none'` if omitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    query: Union[str, float, bool, DefaultType]
+    analyzer: Union[str, DefaultType]
+    auto_generate_synonyms_phrase_query: Union[bool, DefaultType]
+    cutoff_frequency: Union[float, DefaultType]
+    fuzziness: Union[str, int, DefaultType]
+    fuzzy_rewrite: Union[str, DefaultType]
+    fuzzy_transpositions: Union[bool, DefaultType]
+    lenient: Union[bool, DefaultType]
+    max_expansions: Union[int, DefaultType]
+    minimum_should_match: Union[int, str, DefaultType]
+    operator: Union[Literal["and", "or"], DefaultType]
+    prefix_length: Union[int, DefaultType]
+    zero_terms_query: Union[Literal["all", "none"], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        query: Union[str, float, bool, DefaultType] = DEFAULT,
+        analyzer: Union[str, DefaultType] = DEFAULT,
+        auto_generate_synonyms_phrase_query: Union[bool, DefaultType] = DEFAULT,
+        cutoff_frequency: Union[float, DefaultType] = DEFAULT,
+        fuzziness: Union[str, int, DefaultType] = DEFAULT,
+        fuzzy_rewrite: Union[str, DefaultType] = DEFAULT,
+        fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT,
+        lenient: Union[bool, DefaultType] = DEFAULT,
+        max_expansions: Union[int, DefaultType] = DEFAULT,
+        minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
+        operator: Union[Literal["and", "or"], DefaultType] = DEFAULT,
+        prefix_length: Union[int, DefaultType] = DEFAULT,
+        zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if query is not DEFAULT:
+            kwargs["query"] = query
+        if analyzer is not DEFAULT:
+            kwargs["analyzer"] = analyzer
+        if auto_generate_synonyms_phrase_query is not DEFAULT:
+            kwargs["auto_generate_synonyms_phrase_query"] = (
+                auto_generate_synonyms_phrase_query
+            )
+        if cutoff_frequency is not DEFAULT:
+            kwargs["cutoff_frequency"] = cutoff_frequency
+        if fuzziness is not DEFAULT:
+            kwargs["fuzziness"] = fuzziness
+        if fuzzy_rewrite is not DEFAULT:
+            kwargs["fuzzy_rewrite"] = fuzzy_rewrite
+        if fuzzy_transpositions is not DEFAULT:
+            kwargs["fuzzy_transpositions"] = fuzzy_transpositions
+        if lenient is not DEFAULT:
+            kwargs["lenient"] = lenient
+        if max_expansions is not DEFAULT:
+            kwargs["max_expansions"] = max_expansions
+        if minimum_should_match is not DEFAULT:
+            kwargs["minimum_should_match"] = minimum_should_match
+        if operator is not DEFAULT:
+            kwargs["operator"] = operator
+        if prefix_length is not DEFAULT:
+            kwargs["prefix_length"] = prefix_length
+        if zero_terms_query is not DEFAULT:
+            kwargs["zero_terms_query"] = zero_terms_query
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class MultiTermLookup(AttrDict[Any]):
+    """
+    :arg field: (required) A fields from which to retrieve terms.
+    :arg missing: The value to apply to documents that do not have a
+        value. By default, documents without a value are ignored.
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+    missing: Union[str, int, float, bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        missing: Union[str, int, float, bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        if missing is not DEFAULT:
+            kwargs["missing"] = missing
+        super().__init__(kwargs)
+
+
+class MutualInformationHeuristic(AttrDict[Any]):
+    """
+    :arg background_is_superset: Set to `false` if you defined a custom
+        background filter that represents a different set of documents
+        that you want to compare to.
+    :arg include_negatives: Set to `false` to filter out the terms that
+        appear less often in the subset than in documents outside the
+        subset.
+    """
+
+    background_is_superset: Union[bool, DefaultType]
+    include_negatives: Union[bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        background_is_superset: Union[bool, DefaultType] = DEFAULT,
+        include_negatives: Union[bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if background_is_superset is not DEFAULT:
+            kwargs["background_is_superset"] = background_is_superset
+        if include_negatives is not DEFAULT:
+            kwargs["include_negatives"] = include_negatives
+        super().__init__(kwargs)
+
+
+class NestedSortValue(AttrDict[Any]):
+    """
+    :arg path: (required)
+    :arg filter:
+    :arg max_children:
+    :arg nested:
+    """
+
+    path: Union[str, InstrumentedField, DefaultType]
+    filter: Union[Query, DefaultType]
+    max_children: Union[int, DefaultType]
+    nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        filter: Union[Query, DefaultType] = DEFAULT,
+        max_children: Union[int, DefaultType] = DEFAULT,
+        nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if path is not DEFAULT:
+            kwargs["path"] = str(path)
+        if filter is not DEFAULT:
+            kwargs["filter"] = filter
+        if max_children is not DEFAULT:
+            kwargs["max_children"] = max_children
+        if nested is not DEFAULT:
+            kwargs["nested"] = nested
+        super().__init__(kwargs)
+
+
+class NumericFielddata(AttrDict[Any]):
+    """
+    :arg format: (required)
+    """
+
+    format: Union[Literal["array", "disabled"], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        format: Union[Literal["array", "disabled"], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if format is not DEFAULT:
+            kwargs["format"] = format
+        super().__init__(kwargs)
+
+
+class PValueHeuristic(AttrDict[Any]):
+    """
+    :arg background_is_superset:
+    :arg normalize_above: Should the results be normalized when above the
+        given value. Allows for consistent significance results at various
+        scales. Note: `0` is a special value which means no normalization
+    """
+
+    background_is_superset: Union[bool, DefaultType]
+    normalize_above: Union[int, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        background_is_superset: Union[bool, DefaultType] = DEFAULT,
+        normalize_above: Union[int, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if background_is_superset is not DEFAULT:
+            kwargs["background_is_superset"] = background_is_superset
+        if normalize_above is not DEFAULT:
+            kwargs["normalize_above"] = normalize_above
+        super().__init__(kwargs)
+
+
+class PercentageScoreHeuristic(AttrDict[Any]):
+    pass
+
+
+class PinnedDoc(AttrDict[Any]):
+    """
+    :arg _id: (required) The unique document ID.
+    :arg _index: The index that contains the document.
+    """
+
+    _id: Union[str, DefaultType]
+    _index: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        _id: Union[str, DefaultType] = DEFAULT,
+        _index: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _id is not DEFAULT:
+            kwargs["_id"] = _id
+        if _index is not DEFAULT:
+            kwargs["_index"] = _index
+        super().__init__(kwargs)
+
+
+class PrefixQuery(AttrDict[Any]):
+    """
+    :arg value: (required) Beginning characters of terms you wish to find
+        in the provided field.
+    :arg rewrite: Method used to rewrite the query.
+    :arg case_insensitive: Allows ASCII case insensitive matching of the
+        value with the indexed field values when set to `true`. Default is
+        `false` which means the case sensitivity of matching depends on
+        the underlying field’s mapping.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    value: Union[str, DefaultType]
+    rewrite: Union[str, DefaultType]
+    case_insensitive: Union[bool, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        value: Union[str, DefaultType] = DEFAULT,
+        rewrite: Union[str, DefaultType] = DEFAULT,
+        case_insensitive: Union[bool, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if value is not DEFAULT:
+            kwargs["value"] = value
+        if rewrite is not DEFAULT:
+            kwargs["rewrite"] = rewrite
+        if case_insensitive is not DEFAULT:
+            kwargs["case_insensitive"] = case_insensitive
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class QueryVectorBuilder(AttrDict[Any]):
+    """
+    :arg text_embedding:
+    """
+
+    text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if text_embedding is not DEFAULT:
+            kwargs["text_embedding"] = text_embedding
+        super().__init__(kwargs)
+
+
+class RankFeatureFunctionLinear(AttrDict[Any]):
+    pass
+
+
+class RankFeatureFunctionLogarithm(AttrDict[Any]):
+    """
+    :arg scaling_factor: (required) Configurable scaling factor.
+    """
+
+    scaling_factor: Union[float, DefaultType]
+
+    def __init__(
+        self, *, scaling_factor: Union[float, DefaultType] = DEFAULT, **kwargs: Any
+    ):
+        if scaling_factor is not DEFAULT:
+            kwargs["scaling_factor"] = scaling_factor
+        super().__init__(kwargs)
+
+
+class RankFeatureFunctionSaturation(AttrDict[Any]):
+    """
+    :arg pivot: Configurable pivot value so that the result will be less
+        than 0.5.
+    """
+
+    pivot: Union[float, DefaultType]
+
+    def __init__(self, *, pivot: Union[float, DefaultType] = DEFAULT, **kwargs: Any):
+        if pivot is not DEFAULT:
+            kwargs["pivot"] = pivot
+        super().__init__(kwargs)
+
+
+class RankFeatureFunctionSigmoid(AttrDict[Any]):
+    """
+    :arg pivot: (required) Configurable pivot value so that the result
+        will be less than 0.5.
+    :arg exponent: (required) Configurable Exponent.
+    """
+
+    pivot: Union[float, DefaultType]
+    exponent: Union[float, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        pivot: Union[float, DefaultType] = DEFAULT,
+        exponent: Union[float, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if pivot is not DEFAULT:
+            kwargs["pivot"] = pivot
+        if exponent is not DEFAULT:
+            kwargs["exponent"] = exponent
+        super().__init__(kwargs)
+
+
+class RegexpQuery(AttrDict[Any]):
+    """
+    :arg value: (required) Regular expression for terms you wish to find
+        in the provided field.
+    :arg case_insensitive: Allows case insensitive matching of the regular
+        expression value with the indexed field values when set to `true`.
+        When `false`, case sensitivity of matching depends on the
+        underlying field’s mapping.
+    :arg flags: Enables optional operators for the regular expression.
+    :arg max_determinized_states: Maximum number of automaton states
+        required for the query. Defaults to `10000` if omitted.
+    :arg rewrite: Method used to rewrite the query.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    value: Union[str, DefaultType]
+    case_insensitive: Union[bool, DefaultType]
+    flags: Union[str, DefaultType]
+    max_determinized_states: Union[int, DefaultType]
+    rewrite: Union[str, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        value: Union[str, DefaultType] = DEFAULT,
+        case_insensitive: Union[bool, DefaultType] = DEFAULT,
+        flags: Union[str, DefaultType] = DEFAULT,
+        max_determinized_states: Union[int, DefaultType] = DEFAULT,
+        rewrite: Union[str, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if value is not DEFAULT:
+            kwargs["value"] = value
+        if case_insensitive is not DEFAULT:
+            kwargs["case_insensitive"] = case_insensitive
+        if flags is not DEFAULT:
+            kwargs["flags"] = flags
+        if max_determinized_states is not DEFAULT:
+            kwargs["max_determinized_states"] = max_determinized_states
+        if rewrite is not DEFAULT:
+            kwargs["rewrite"] = rewrite
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class RegressionInferenceOptions(AttrDict[Any]):
+    """
+    :arg results_field: The field that is added to incoming documents to
+        contain the inference prediction. Defaults to predicted_value.
+    :arg num_top_feature_importance_values: Specifies the maximum number
+        of feature importance values per document.
+    """
+
+    results_field: Union[str, InstrumentedField, DefaultType]
+    num_top_feature_importance_values: Union[int, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        results_field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if results_field is not DEFAULT:
+            kwargs["results_field"] = str(results_field)
+        if num_top_feature_importance_values is not DEFAULT:
+            kwargs["num_top_feature_importance_values"] = (
+                num_top_feature_importance_values
+            )
+        super().__init__(kwargs)
+
+
+class RescoreVector(AttrDict[Any]):
+    """
+    :arg oversample: (required) Applies the specified oversample factor to
+        k on the approximate kNN search
+    """
+
+    oversample: Union[float, DefaultType]
+
+    def __init__(
+        self, *, oversample: Union[float, DefaultType] = DEFAULT, **kwargs: Any
+    ):
+        if oversample is not DEFAULT:
+            kwargs["oversample"] = oversample
+        super().__init__(kwargs)
+
+
+class ScoreSort(AttrDict[Any]):
+    """
+    :arg order:
+    """
+
+    order: Union[Literal["asc", "desc"], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if order is not DEFAULT:
+            kwargs["order"] = order
+        super().__init__(kwargs)
+
+
+class Script(AttrDict[Any]):
+    """
+    :arg source: The script source.
+    :arg id: The `id` for a stored script.
+    :arg params: Specifies any named parameters that are passed into the
+        script as variables. Use parameters instead of hard-coded values
+        to decrease compile time.
+    :arg lang: Specifies the language the script is written in. Defaults
+        to `painless` if omitted.
+    :arg options:
+    """
+
+    source: Union[str, Dict[str, Any], DefaultType]
+    id: Union[str, DefaultType]
+    params: Union[Mapping[str, Any], DefaultType]
+    lang: Union[Literal["painless", "expression", "mustache", "java"], DefaultType]
+    options: Union[Mapping[str, str], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        source: Union[str, Dict[str, Any], DefaultType] = DEFAULT,
+        id: Union[str, DefaultType] = DEFAULT,
+        params: Union[Mapping[str, Any], DefaultType] = DEFAULT,
+        lang: Union[
+            Literal["painless", "expression", "mustache", "java"], DefaultType
+        ] = DEFAULT,
+        options: Union[Mapping[str, str], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if source is not DEFAULT:
+            kwargs["source"] = source
+        if id is not DEFAULT:
+            kwargs["id"] = id
+        if params is not DEFAULT:
+            kwargs["params"] = params
+        if lang is not DEFAULT:
+            kwargs["lang"] = lang
+        if options is not DEFAULT:
+            kwargs["options"] = options
+        super().__init__(kwargs)
+
+
+class ScriptField(AttrDict[Any]):
+    """
+    :arg script: (required)
+    :arg ignore_failure:
+    """
+
+    script: Union["Script", Dict[str, Any], DefaultType]
+    ignore_failure: Union[bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+        ignore_failure: Union[bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if ignore_failure is not DEFAULT:
+            kwargs["ignore_failure"] = ignore_failure
+        super().__init__(kwargs)
+
+
+class ScriptSort(AttrDict[Any]):
+    """
+    :arg script: (required)
+    :arg order:
+    :arg type:
+    :arg mode:
+    :arg nested:
+    """
+
+    script: Union["Script", Dict[str, Any], DefaultType]
+    order: Union[Literal["asc", "desc"], DefaultType]
+    type: Union[Literal["string", "number", "version"], DefaultType]
+    mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType]
+    nested: Union["NestedSortValue", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+        order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT,
+        type: Union[Literal["string", "number", "version"], DefaultType] = DEFAULT,
+        mode: Union[
+            Literal["min", "max", "sum", "avg", "median"], DefaultType
+        ] = DEFAULT,
+        nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if order is not DEFAULT:
+            kwargs["order"] = order
+        if type is not DEFAULT:
+            kwargs["type"] = type
+        if mode is not DEFAULT:
+            kwargs["mode"] = mode
+        if nested is not DEFAULT:
+            kwargs["nested"] = nested
+        super().__init__(kwargs)
+
+
+class ScriptedHeuristic(AttrDict[Any]):
+    """
+    :arg script: (required)
+    """
+
+    script: Union["Script", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        super().__init__(kwargs)
+
+
+class SemanticTextIndexOptions(AttrDict[Any]):
+    """
+    :arg dense_vector:
+    :arg sparse_vector:
+    """
+
+    dense_vector: Union["DenseVectorIndexOptions", Dict[str, Any], DefaultType]
+    sparse_vector: Union["SparseVectorIndexOptions", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        dense_vector: Union[
+            "DenseVectorIndexOptions", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        sparse_vector: Union[
+            "SparseVectorIndexOptions", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if dense_vector is not DEFAULT:
+            kwargs["dense_vector"] = dense_vector
+        if sparse_vector is not DEFAULT:
+            kwargs["sparse_vector"] = sparse_vector
+        super().__init__(kwargs)
+
+
+class ShapeFieldQuery(AttrDict[Any]):
+    """
+    :arg indexed_shape: Queries using a pre-indexed shape.
+    :arg relation: Spatial relation between the query shape and the
+        document shape.
+    :arg shape: Queries using an inline shape definition in GeoJSON or
+        Well Known Text (WKT) format.
+    """
+
+    indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType]
+    relation: Union[
+        Literal["intersects", "disjoint", "within", "contains"], DefaultType
+    ]
+    shape: Any
+
+    def __init__(
+        self,
+        *,
+        indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT,
+        relation: Union[
+            Literal["intersects", "disjoint", "within", "contains"], DefaultType
+        ] = DEFAULT,
+        shape: Any = DEFAULT,
+        **kwargs: Any,
+    ):
+        if indexed_shape is not DEFAULT:
+            kwargs["indexed_shape"] = indexed_shape
+        if relation is not DEFAULT:
+            kwargs["relation"] = relation
+        if shape is not DEFAULT:
+            kwargs["shape"] = shape
+        super().__init__(kwargs)
+
+
+class SortOptions(AttrDict[Any]):
+    """
+    :arg _field: The field to use in this query.
+    :arg _value: The query value for the field.
+    :arg _score:
+    :arg _doc:
+    :arg _geo_distance:
+    :arg _script:
+    """
+
+    _field: Union[str, "InstrumentedField", "DefaultType"]
+    _value: Union["FieldSort", Dict[str, Any], "DefaultType"]
+    _score: Union["ScoreSort", Dict[str, Any], DefaultType]
+    _doc: Union["ScoreSort", Dict[str, Any], DefaultType]
+    _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType]
+    _script: Union["ScriptSort", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
+        _value: Union["FieldSort", Dict[str, Any], "DefaultType"] = DEFAULT,
+        *,
+        _score: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
+        _doc: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT,
+        _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] = DEFAULT,
+        _script: Union["ScriptSort", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        if _score is not DEFAULT:
+            kwargs["_score"] = _score
+        if _doc is not DEFAULT:
+            kwargs["_doc"] = _doc
+        if _geo_distance is not DEFAULT:
+            kwargs["_geo_distance"] = _geo_distance
+        if _script is not DEFAULT:
+            kwargs["_script"] = _script
+        super().__init__(kwargs)
+
+
+class SourceFilter(AttrDict[Any]):
+    """
+    :arg exclude_vectors: If `true`, vector fields are excluded from the
+        returned source.  This option takes precedence over `includes`:
+        any vector field will remain excluded even if it matches an
+        `includes` rule.
+    :arg excludes: A list of fields to exclude from the returned source.
+    :arg includes: A list of fields to include in the returned source.
+    """
+
+    exclude_vectors: Union[bool, DefaultType]
+    excludes: Union[
+        Union[str, InstrumentedField],
+        Sequence[Union[str, InstrumentedField]],
+        DefaultType,
+    ]
+    includes: Union[
+        Union[str, InstrumentedField],
+        Sequence[Union[str, InstrumentedField]],
+        DefaultType,
+    ]
+
+    def __init__(
+        self,
+        *,
+        exclude_vectors: Union[bool, DefaultType] = DEFAULT,
+        excludes: Union[
+            Union[str, InstrumentedField],
+            Sequence[Union[str, InstrumentedField]],
+            DefaultType,
+        ] = DEFAULT,
+        includes: Union[
+            Union[str, InstrumentedField],
+            Sequence[Union[str, InstrumentedField]],
+            DefaultType,
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if exclude_vectors is not DEFAULT:
+            kwargs["exclude_vectors"] = exclude_vectors
+        if excludes is not DEFAULT:
+            kwargs["excludes"] = str(excludes)
+        if includes is not DEFAULT:
+            kwargs["includes"] = str(includes)
+        super().__init__(kwargs)
+
+
+class SpanContainingQuery(AttrDict[Any]):
+    """
+    :arg big: (required) Can be any span query. Matching spans from `big`
+        that contain matches from `little` are returned.
+    :arg little: (required) Can be any span query. Matching spans from
+        `big` that contain matches from `little` are returned.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    big: Union["SpanQuery", Dict[str, Any], DefaultType]
+    little: Union["SpanQuery", Dict[str, Any], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if big is not DEFAULT:
+            kwargs["big"] = big
+        if little is not DEFAULT:
+            kwargs["little"] = little
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SpanFieldMaskingQuery(AttrDict[Any]):
+    """
+    :arg field: (required)
+    :arg query: (required)
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+    query: Union["SpanQuery", Dict[str, Any], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        query: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        if query is not DEFAULT:
+            kwargs["query"] = query
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SpanFirstQuery(AttrDict[Any]):
+    """
+    :arg end: (required) Controls the maximum end position permitted in a
+        match.
+    :arg match: (required) Can be any other span type query.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    end: Union[int, DefaultType]
+    match: Union["SpanQuery", Dict[str, Any], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        end: Union[int, DefaultType] = DEFAULT,
+        match: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if end is not DEFAULT:
+            kwargs["end"] = end
+        if match is not DEFAULT:
+            kwargs["match"] = match
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SpanMultiTermQuery(AttrDict[Any]):
+    """
+    :arg match: (required) Should be a multi term query (one of
+        `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query).
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    match: Union[Query, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        match: Union[Query, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if match is not DEFAULT:
+            kwargs["match"] = match
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SpanNearQuery(AttrDict[Any]):
+    """
+    :arg clauses: (required) Array of one or more other span type queries.
+    :arg in_order: Controls whether matches are required to be in-order.
+    :arg slop: Controls the maximum number of intervening unmatched
+        positions permitted.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType]
+    in_order: Union[bool, DefaultType]
+    slop: Union[int, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        clauses: Union[
+            Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType
+        ] = DEFAULT,
+        in_order: Union[bool, DefaultType] = DEFAULT,
+        slop: Union[int, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if clauses is not DEFAULT:
+            kwargs["clauses"] = clauses
+        if in_order is not DEFAULT:
+            kwargs["in_order"] = in_order
+        if slop is not DEFAULT:
+            kwargs["slop"] = slop
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SpanNotQuery(AttrDict[Any]):
+    """
+    :arg exclude: (required) Span query whose matches must not overlap
+        those returned.
+    :arg include: (required) Span query whose matches are filtered.
+    :arg dist: The number of tokens from within the include span that
+        can’t have overlap with the exclude span. Equivalent to setting
+        both `pre` and `post`.
+    :arg post: The number of tokens after the include span that can’t have
+        overlap with the exclude span.
+    :arg pre: The number of tokens before the include span that can’t have
+        overlap with the exclude span.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    exclude: Union["SpanQuery", Dict[str, Any], DefaultType]
+    include: Union["SpanQuery", Dict[str, Any], DefaultType]
+    dist: Union[int, DefaultType]
+    post: Union[int, DefaultType]
+    pre: Union[int, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        exclude: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        include: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        dist: Union[int, DefaultType] = DEFAULT,
+        post: Union[int, DefaultType] = DEFAULT,
+        pre: Union[int, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if exclude is not DEFAULT:
+            kwargs["exclude"] = exclude
+        if include is not DEFAULT:
+            kwargs["include"] = include
+        if dist is not DEFAULT:
+            kwargs["dist"] = dist
+        if post is not DEFAULT:
+            kwargs["post"] = post
+        if pre is not DEFAULT:
+            kwargs["pre"] = pre
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SpanOrQuery(AttrDict[Any]):
+    """
+    :arg clauses: (required) Array of one or more other span type queries.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        clauses: Union[
+            Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType
+        ] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if clauses is not DEFAULT:
+            kwargs["clauses"] = clauses
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SpanQuery(AttrDict[Any]):
+    """
+    :arg span_containing: Accepts a list of span queries, but only returns
+        those spans which also match a second span query.
+    :arg span_field_masking: Allows queries like `span_near` or `span_or`
+        across different fields.
+    :arg span_first: Accepts another span query whose matches must appear
+        within the first N positions of the field.
+    :arg span_gap:
+    :arg span_multi: Wraps a `term`, `range`, `prefix`, `wildcard`,
+        `regexp`, or `fuzzy` query.
+    :arg span_near: Accepts multiple span queries whose matches must be
+        within the specified distance of each other, and possibly in the
+        same order.
+    :arg span_not: Wraps another span query, and excludes any documents
+        which match that query.
+    :arg span_or: Combines multiple span queries and returns documents
+        which match any of the specified queries.
+    :arg span_term: The equivalent of the `term` query but for use with
+        other span queries.
+    :arg span_within: The result from a single span query is returned as
+        long is its span falls within the spans returned by a list of
+        other span queries.
+    """
+
+    span_containing: Union["SpanContainingQuery", Dict[str, Any], DefaultType]
+    span_field_masking: Union["SpanFieldMaskingQuery", Dict[str, Any], DefaultType]
+    span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType]
+    span_gap: Union[Mapping[Union[str, InstrumentedField], int], DefaultType]
+    span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType]
+    span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType]
+    span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType]
+    span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType]
+    span_term: Union[
+        Mapping[Union[str, InstrumentedField], "SpanTermQuery"],
+        Dict[str, Any],
+        DefaultType,
+    ]
+    span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        span_containing: Union[
+            "SpanContainingQuery", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        span_field_masking: Union[
+            "SpanFieldMaskingQuery", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        span_gap: Union[
+            Mapping[Union[str, InstrumentedField], int], DefaultType
+        ] = DEFAULT,
+        span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        span_term: Union[
+            Mapping[Union[str, InstrumentedField], "SpanTermQuery"],
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if span_containing is not DEFAULT:
+            kwargs["span_containing"] = span_containing
+        if span_field_masking is not DEFAULT:
+            kwargs["span_field_masking"] = span_field_masking
+        if span_first is not DEFAULT:
+            kwargs["span_first"] = span_first
+        if span_gap is not DEFAULT:
+            kwargs["span_gap"] = str(span_gap)
+        if span_multi is not DEFAULT:
+            kwargs["span_multi"] = span_multi
+        if span_near is not DEFAULT:
+            kwargs["span_near"] = span_near
+        if span_not is not DEFAULT:
+            kwargs["span_not"] = span_not
+        if span_or is not DEFAULT:
+            kwargs["span_or"] = span_or
+        if span_term is not DEFAULT:
+            kwargs["span_term"] = str(span_term)
+        if span_within is not DEFAULT:
+            kwargs["span_within"] = span_within
+        super().__init__(kwargs)
+
+
+class SpanTermQuery(AttrDict[Any]):
+    """
+    :arg value: (required)
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    value: Union[int, float, str, bool, None, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        value: Union[int, float, str, bool, None, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if value is not DEFAULT:
+            kwargs["value"] = value
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SpanWithinQuery(AttrDict[Any]):
+    """
+    :arg big: (required) Can be any span query. Matching spans from
+        `little` that are enclosed within `big` are returned.
+    :arg little: (required) Can be any span query. Matching spans from
+        `little` that are enclosed within `big` are returned.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    big: Union["SpanQuery", Dict[str, Any], DefaultType]
+    little: Union["SpanQuery", Dict[str, Any], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if big is not DEFAULT:
+            kwargs["big"] = big
+        if little is not DEFAULT:
+            kwargs["little"] = little
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class SparseVectorIndexOptions(AttrDict[Any]):
+    """
+    :arg prune: Whether to perform pruning, omitting the non-significant
+        tokens from the query to improve query performance. If prune is
+        true but the pruning_config is not specified, pruning will occur
+        but default values will be used. Default: false
+    :arg pruning_config: Optional pruning configuration. If enabled, this
+        will omit non-significant tokens from the query in order to
+        improve query performance. This is only used if prune is set to
+        true. If prune is set to true but pruning_config is not specified,
+        default values will be used.
+    """
+
+    prune: Union[bool, DefaultType]
+    pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        prune: Union[bool, DefaultType] = DEFAULT,
+        pruning_config: Union[
+            "TokenPruningConfig", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if prune is not DEFAULT:
+            kwargs["prune"] = prune
+        if pruning_config is not DEFAULT:
+            kwargs["pruning_config"] = pruning_config
+        super().__init__(kwargs)
+
+
+class SuggestContext(AttrDict[Any]):
+    """
+    :arg name: (required)
+    :arg type: (required)
+    :arg path:
+    :arg precision:
+    """
+
+    name: Union[str, DefaultType]
+    type: Union[str, DefaultType]
+    path: Union[str, InstrumentedField, DefaultType]
+    precision: Union[int, str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        name: Union[str, DefaultType] = DEFAULT,
+        type: Union[str, DefaultType] = DEFAULT,
+        path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        precision: Union[int, str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if name is not DEFAULT:
+            kwargs["name"] = name
+        if type is not DEFAULT:
+            kwargs["type"] = type
+        if path is not DEFAULT:
+            kwargs["path"] = str(path)
+        if precision is not DEFAULT:
+            kwargs["precision"] = precision
+        super().__init__(kwargs)
+
+
+class TDigest(AttrDict[Any]):
+    """
+    :arg compression: Limits the maximum number of nodes used by the
+        underlying TDigest algorithm to `20 * compression`, enabling
+        control of memory usage and approximation error.
+    :arg execution_hint: The default implementation of TDigest is
+        optimized for performance, scaling to millions or even billions of
+        sample values while maintaining acceptable accuracy levels (close
+        to 1% relative error for millions of samples in some cases). To
+        use an implementation optimized for accuracy, set this parameter
+        to high_accuracy instead. Defaults to `default` if omitted.
+    """
+
+    compression: Union[int, DefaultType]
+    execution_hint: Union[Literal["default", "high_accuracy"], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        compression: Union[int, DefaultType] = DEFAULT,
+        execution_hint: Union[
+            Literal["default", "high_accuracy"], DefaultType
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if compression is not DEFAULT:
+            kwargs["compression"] = compression
+        if execution_hint is not DEFAULT:
+            kwargs["execution_hint"] = execution_hint
+        super().__init__(kwargs)
+
+
+class TermQuery(AttrDict[Any]):
+    """
+    :arg value: (required) Term you wish to find in the provided field.
+    :arg case_insensitive: Allows ASCII case insensitive matching of the
+        value with the indexed field values when set to `true`. When
+        `false`, the case sensitivity of matching depends on the
+        underlying field’s mapping.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    value: Union[int, float, str, bool, None, DefaultType]
+    case_insensitive: Union[bool, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        value: Union[int, float, str, bool, None, DefaultType] = DEFAULT,
+        case_insensitive: Union[bool, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if value is not DEFAULT:
+            kwargs["value"] = value
+        if case_insensitive is not DEFAULT:
+            kwargs["case_insensitive"] = case_insensitive
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class TermsLookup(AttrDict[Any]):
+    """
+    :arg index: (required)
+    :arg id: (required)
+    :arg path: (required)
+    :arg routing:
+    """
+
+    index: Union[str, DefaultType]
+    id: Union[str, DefaultType]
+    path: Union[str, InstrumentedField, DefaultType]
+    routing: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        index: Union[str, DefaultType] = DEFAULT,
+        id: Union[str, DefaultType] = DEFAULT,
+        path: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        routing: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if index is not DEFAULT:
+            kwargs["index"] = index
+        if id is not DEFAULT:
+            kwargs["id"] = id
+        if path is not DEFAULT:
+            kwargs["path"] = str(path)
+        if routing is not DEFAULT:
+            kwargs["routing"] = routing
+        super().__init__(kwargs)
+
+
+class TermsPartition(AttrDict[Any]):
+    """
+    :arg num_partitions: (required) The number of partitions.
+    :arg partition: (required) The partition number for this request.
+    """
+
+    num_partitions: Union[int, DefaultType]
+    partition: Union[int, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        num_partitions: Union[int, DefaultType] = DEFAULT,
+        partition: Union[int, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if num_partitions is not DEFAULT:
+            kwargs["num_partitions"] = num_partitions
+        if partition is not DEFAULT:
+            kwargs["partition"] = partition
+        super().__init__(kwargs)
+
+
+class TermsSetQuery(AttrDict[Any]):
+    """
+    :arg terms: (required) Array of terms you wish to find in the provided
+        field.
+    :arg minimum_should_match: Specification describing number of matching
+        terms required to return a document.
+    :arg minimum_should_match_field: Numeric field containing the number
+        of matching terms required to return a document.
+    :arg minimum_should_match_script: Custom script containing the number
+        of matching terms required to return a document.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    terms: Union[Sequence[Union[int, float, str, bool, None]], DefaultType]
+    minimum_should_match: Union[int, str, DefaultType]
+    minimum_should_match_field: Union[str, InstrumentedField, DefaultType]
+    minimum_should_match_script: Union["Script", Dict[str, Any], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        terms: Union[
+            Sequence[Union[int, float, str, bool, None]], DefaultType
+        ] = DEFAULT,
+        minimum_should_match: Union[int, str, DefaultType] = DEFAULT,
+        minimum_should_match_field: Union[
+            str, InstrumentedField, DefaultType
+        ] = DEFAULT,
+        minimum_should_match_script: Union[
+            "Script", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if terms is not DEFAULT:
+            kwargs["terms"] = terms
+        if minimum_should_match is not DEFAULT:
+            kwargs["minimum_should_match"] = minimum_should_match
+        if minimum_should_match_field is not DEFAULT:
+            kwargs["minimum_should_match_field"] = str(minimum_should_match_field)
+        if minimum_should_match_script is not DEFAULT:
+            kwargs["minimum_should_match_script"] = minimum_should_match_script
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class TestPopulation(AttrDict[Any]):
+    """
+    :arg field: (required) The field to aggregate.
+    :arg script:
+    :arg filter: A filter used to define a set of records to run unpaired
+        t-test on.
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+    script: Union["Script", Dict[str, Any], DefaultType]
+    filter: Union[Query, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+        filter: Union[Query, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        if filter is not DEFAULT:
+            kwargs["filter"] = filter
+        super().__init__(kwargs)
+
+
+class TextEmbedding(AttrDict[Any]):
+    """
+    :arg model_text: (required)
+    :arg model_id: Model ID is required for all dense_vector fields but
+        may be inferred for semantic_text fields
+    """
+
+    model_text: Union[str, DefaultType]
+    model_id: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        model_text: Union[str, DefaultType] = DEFAULT,
+        model_id: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if model_text is not DEFAULT:
+            kwargs["model_text"] = model_text
+        if model_id is not DEFAULT:
+            kwargs["model_id"] = model_id
+        super().__init__(kwargs)
+
+
+class TextExpansionQuery(AttrDict[Any]):
+    """
+    :arg model_id: (required) The text expansion NLP model to use
+    :arg model_text: (required) The query text
+    :arg pruning_config: Token pruning configurations
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    model_id: Union[str, DefaultType]
+    model_text: Union[str, DefaultType]
+    pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        model_id: Union[str, DefaultType] = DEFAULT,
+        model_text: Union[str, DefaultType] = DEFAULT,
+        pruning_config: Union[
+            "TokenPruningConfig", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if model_id is not DEFAULT:
+            kwargs["model_id"] = model_id
+        if model_text is not DEFAULT:
+            kwargs["model_text"] = model_text
+        if pruning_config is not DEFAULT:
+            kwargs["pruning_config"] = pruning_config
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class TextIndexPrefixes(AttrDict[Any]):
+    """
+    :arg max_chars: (required)
+    :arg min_chars: (required)
+    """
+
+    max_chars: Union[int, DefaultType]
+    min_chars: Union[int, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        max_chars: Union[int, DefaultType] = DEFAULT,
+        min_chars: Union[int, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if max_chars is not DEFAULT:
+            kwargs["max_chars"] = max_chars
+        if min_chars is not DEFAULT:
+            kwargs["min_chars"] = min_chars
+        super().__init__(kwargs)
+
+
+class TokenPruningConfig(AttrDict[Any]):
+    """
+    :arg tokens_freq_ratio_threshold: Tokens whose frequency is more than
+        this threshold times the average frequency of all tokens in the
+        specified field are considered outliers and pruned. Defaults to
+        `5` if omitted.
+    :arg tokens_weight_threshold: Tokens whose weight is less than this
+        threshold are considered nonsignificant and pruned. Defaults to
+        `0.4` if omitted.
+    :arg only_score_pruned_tokens: Whether to only score pruned tokens, vs
+        only scoring kept tokens.
+    """
+
+    tokens_freq_ratio_threshold: Union[int, DefaultType]
+    tokens_weight_threshold: Union[float, DefaultType]
+    only_score_pruned_tokens: Union[bool, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        tokens_freq_ratio_threshold: Union[int, DefaultType] = DEFAULT,
+        tokens_weight_threshold: Union[float, DefaultType] = DEFAULT,
+        only_score_pruned_tokens: Union[bool, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if tokens_freq_ratio_threshold is not DEFAULT:
+            kwargs["tokens_freq_ratio_threshold"] = tokens_freq_ratio_threshold
+        if tokens_weight_threshold is not DEFAULT:
+            kwargs["tokens_weight_threshold"] = tokens_weight_threshold
+        if only_score_pruned_tokens is not DEFAULT:
+            kwargs["only_score_pruned_tokens"] = only_score_pruned_tokens
+        super().__init__(kwargs)
+
+
+class TopLeftBottomRightGeoBounds(AttrDict[Any]):
+    """
+    :arg top_left: (required)
+    :arg bottom_right: (required)
+    """
+
+    top_left: Union[
+        "LatLonGeoLocation",
+        "GeoHashLocation",
+        Sequence[float],
+        str,
+        Dict[str, Any],
+        DefaultType,
+    ]
+    bottom_right: Union[
+        "LatLonGeoLocation",
+        "GeoHashLocation",
+        Sequence[float],
+        str,
+        Dict[str, Any],
+        DefaultType,
+    ]
+
+    def __init__(
+        self,
+        *,
+        top_left: Union[
+            "LatLonGeoLocation",
+            "GeoHashLocation",
+            Sequence[float],
+            str,
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        bottom_right: Union[
+            "LatLonGeoLocation",
+            "GeoHashLocation",
+            Sequence[float],
+            str,
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if top_left is not DEFAULT:
+            kwargs["top_left"] = top_left
+        if bottom_right is not DEFAULT:
+            kwargs["bottom_right"] = bottom_right
+        super().__init__(kwargs)
+
+
+class TopMetricsValue(AttrDict[Any]):
+    """
+    :arg field: (required) A field to return as a metric.
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        super().__init__(kwargs)
+
+
+class TopRightBottomLeftGeoBounds(AttrDict[Any]):
+    """
+    :arg top_right: (required)
+    :arg bottom_left: (required)
+    """
+
+    top_right: Union[
+        "LatLonGeoLocation",
+        "GeoHashLocation",
+        Sequence[float],
+        str,
+        Dict[str, Any],
+        DefaultType,
+    ]
+    bottom_left: Union[
+        "LatLonGeoLocation",
+        "GeoHashLocation",
+        Sequence[float],
+        str,
+        Dict[str, Any],
+        DefaultType,
+    ]
+
+    def __init__(
+        self,
+        *,
+        top_right: Union[
+            "LatLonGeoLocation",
+            "GeoHashLocation",
+            Sequence[float],
+            str,
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        bottom_left: Union[
+            "LatLonGeoLocation",
+            "GeoHashLocation",
+            Sequence[float],
+            str,
+            Dict[str, Any],
+            DefaultType,
+        ] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if top_right is not DEFAULT:
+            kwargs["top_right"] = top_right
+        if bottom_left is not DEFAULT:
+            kwargs["bottom_left"] = bottom_left
+        super().__init__(kwargs)
+
+
+class WeightedAverageValue(AttrDict[Any]):
+    """
+    :arg field: The field from which to extract the values or weights.
+    :arg missing: A value or weight to use if the field is missing.
+    :arg script:
+    """
+
+    field: Union[str, InstrumentedField, DefaultType]
+    missing: Union[float, DefaultType]
+    script: Union["Script", Dict[str, Any], DefaultType]
+
+    def __init__(
+        self,
+        *,
+        field: Union[str, InstrumentedField, DefaultType] = DEFAULT,
+        missing: Union[float, DefaultType] = DEFAULT,
+        script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if field is not DEFAULT:
+            kwargs["field"] = str(field)
+        if missing is not DEFAULT:
+            kwargs["missing"] = missing
+        if script is not DEFAULT:
+            kwargs["script"] = script
+        super().__init__(kwargs)
+
+
+class WeightedTokensQuery(AttrDict[Any]):
+    """
+    :arg tokens: (required) The tokens representing this query
+    :arg pruning_config: Token pruning configurations
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    tokens: Union[Mapping[str, float], Sequence[Mapping[str, float]], DefaultType]
+    pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        tokens: Union[
+            Mapping[str, float], Sequence[Mapping[str, float]], DefaultType
+        ] = DEFAULT,
+        pruning_config: Union[
+            "TokenPruningConfig", Dict[str, Any], DefaultType
+        ] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if tokens is not DEFAULT:
+            kwargs["tokens"] = tokens
+        if pruning_config is not DEFAULT:
+            kwargs["pruning_config"] = pruning_config
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class WildcardQuery(AttrDict[Any]):
+    """
+    :arg case_insensitive: Allows case insensitive matching of the pattern
+        with the indexed field values when set to true. Default is false
+        which means the case sensitivity of matching depends on the
+        underlying field’s mapping.
+    :arg rewrite: Method used to rewrite the query.
+    :arg value: Wildcard pattern for terms you wish to find in the
+        provided field. Required, when wildcard is not set.
+    :arg wildcard: Wildcard pattern for terms you wish to find in the
+        provided field. Required, when value is not set.
+    :arg boost: Floating point number used to decrease or increase the
+        relevance scores of the query. Boost values are relative to the
+        default value of 1.0. A boost value between 0 and 1.0 decreases
+        the relevance score. A value greater than 1.0 increases the
+        relevance score. Defaults to `1` if omitted.
+    :arg _name:
+    """
+
+    case_insensitive: Union[bool, DefaultType]
+    rewrite: Union[str, DefaultType]
+    value: Union[str, DefaultType]
+    wildcard: Union[str, DefaultType]
+    boost: Union[float, DefaultType]
+    _name: Union[str, DefaultType]
+
+    def __init__(
+        self,
+        *,
+        case_insensitive: Union[bool, DefaultType] = DEFAULT,
+        rewrite: Union[str, DefaultType] = DEFAULT,
+        value: Union[str, DefaultType] = DEFAULT,
+        wildcard: Union[str, DefaultType] = DEFAULT,
+        boost: Union[float, DefaultType] = DEFAULT,
+        _name: Union[str, DefaultType] = DEFAULT,
+        **kwargs: Any,
+    ):
+        if case_insensitive is not DEFAULT:
+            kwargs["case_insensitive"] = case_insensitive
+        if rewrite is not DEFAULT:
+            kwargs["rewrite"] = rewrite
+        if value is not DEFAULT:
+            kwargs["value"] = value
+        if wildcard is not DEFAULT:
+            kwargs["wildcard"] = wildcard
+        if boost is not DEFAULT:
+            kwargs["boost"] = boost
+        if _name is not DEFAULT:
+            kwargs["_name"] = _name
+        super().__init__(kwargs)
+
+
+class WktGeoBounds(AttrDict[Any]):
+    """
+    :arg wkt: (required)
+    """
+
+    wkt: Union[str, DefaultType]
+
+    def __init__(self, *, wkt: Union[str, DefaultType] = DEFAULT, **kwargs: Any):
+        if wkt is not DEFAULT:
+            kwargs["wkt"] = wkt
+        super().__init__(kwargs)
+
+
+class AdjacencyMatrixAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["AdjacencyMatrixBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "AdjacencyMatrixBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class AdjacencyMatrixBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    """
+
+    key: str
+    doc_count: int
+
+
+class AggregationBreakdown(AttrDict[Any]):
+    """
+    :arg build_aggregation: (required)
+    :arg build_aggregation_count: (required)
+    :arg build_leaf_collector: (required)
+    :arg build_leaf_collector_count: (required)
+    :arg collect: (required)
+    :arg collect_count: (required)
+    :arg initialize: (required)
+    :arg initialize_count: (required)
+    :arg reduce: (required)
+    :arg reduce_count: (required)
+    :arg post_collection:
+    :arg post_collection_count:
+    """
+
+    build_aggregation: int
+    build_aggregation_count: int
+    build_leaf_collector: int
+    build_leaf_collector_count: int
+    collect: int
+    collect_count: int
+    initialize: int
+    initialize_count: int
+    reduce: int
+    reduce_count: int
+    post_collection: int
+    post_collection_count: int
+
+
+class AggregationProfile(AttrDict[Any]):
+    """
+    :arg breakdown: (required)
+    :arg description: (required)
+    :arg time_in_nanos: (required)
+    :arg type: (required)
+    :arg debug:
+    :arg children:
+    """
+
+    breakdown: "AggregationBreakdown"
+    description: str
+    time_in_nanos: Any
+    type: str
+    debug: "AggregationProfileDebug"
+    children: Sequence["AggregationProfile"]
+
+
+class AggregationProfileDebug(AttrDict[Any]):
+    """
+    :arg segments_with_multi_valued_ords:
+    :arg collection_strategy:
+    :arg segments_with_single_valued_ords:
+    :arg total_buckets:
+    :arg built_buckets:
+    :arg result_strategy:
+    :arg has_filter:
+    :arg delegate:
+    :arg delegate_debug:
+    :arg chars_fetched:
+    :arg extract_count:
+    :arg extract_ns:
+    :arg values_fetched:
+    :arg collect_analyzed_ns:
+    :arg collect_analyzed_count:
+    :arg surviving_buckets:
+    :arg ordinals_collectors_used:
+    :arg ordinals_collectors_overhead_too_high:
+    :arg string_hashing_collectors_used:
+    :arg numeric_collectors_used:
+    :arg empty_collectors_used:
+    :arg deferred_aggregators:
+    :arg segments_with_doc_count_field:
+    :arg segments_with_deleted_docs:
+    :arg filters:
+    :arg segments_counted:
+    :arg segments_collected:
+    :arg map_reducer:
+    :arg brute_force_used:
+    :arg dynamic_pruning_attempted:
+    :arg dynamic_pruning_used:
+    :arg skipped_due_to_no_data:
+    """
+
+    segments_with_multi_valued_ords: int
+    collection_strategy: str
+    segments_with_single_valued_ords: int
+    total_buckets: int
+    built_buckets: int
+    result_strategy: str
+    has_filter: bool
+    delegate: str
+    delegate_debug: "AggregationProfileDebug"
+    chars_fetched: int
+    extract_count: int
+    extract_ns: int
+    values_fetched: int
+    collect_analyzed_ns: int
+    collect_analyzed_count: int
+    surviving_buckets: int
+    ordinals_collectors_used: int
+    ordinals_collectors_overhead_too_high: int
+    string_hashing_collectors_used: int
+    numeric_collectors_used: int
+    empty_collectors_used: int
+    deferred_aggregators: Sequence[str]
+    segments_with_doc_count_field: int
+    segments_with_deleted_docs: int
+    filters: Sequence["AggregationProfileDelegateDebugFilter"]
+    segments_counted: int
+    segments_collected: int
+    map_reducer: str
+    brute_force_used: int
+    dynamic_pruning_attempted: int
+    dynamic_pruning_used: int
+    skipped_due_to_no_data: int
+
+
+class AggregationProfileDelegateDebugFilter(AttrDict[Any]):
+    """
+    :arg results_from_metadata:
+    :arg query:
+    :arg specialized_for:
+    :arg segments_counted_in_constant_time:
+    """
+
+    results_from_metadata: int
+    query: str
+    specialized_for: str
+    segments_counted_in_constant_time: int
+
+
+class ArrayPercentilesItem(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg value: (required)
+    :arg value_as_string:
+    """
+
+    key: float
+    value: Union[float, None]
+    value_as_string: str
+
+
+class AutoDateHistogramAggregate(AttrDict[Any]):
+    """
+    :arg interval: (required)
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    interval: str
+    buckets: Sequence["DateHistogramBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "DateHistogramBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class AvgAggregate(AttrDict[Any]):
+    """
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class BoxPlotAggregate(AttrDict[Any]):
+    """
+    :arg min: (required)
+    :arg max: (required)
+    :arg q1: (required)
+    :arg q2: (required)
+    :arg q3: (required)
+    :arg lower: (required)
+    :arg upper: (required)
+    :arg min_as_string:
+    :arg max_as_string:
+    :arg q1_as_string:
+    :arg q2_as_string:
+    :arg q3_as_string:
+    :arg lower_as_string:
+    :arg upper_as_string:
+    :arg meta:
+    """
+
+    min: float
+    max: float
+    q1: float
+    q2: float
+    q3: float
+    lower: float
+    upper: float
+    min_as_string: str
+    max_as_string: str
+    q1_as_string: str
+    q2_as_string: str
+    q3_as_string: str
+    lower_as_string: str
+    upper_as_string: str
+    meta: Mapping[str, Any]
+
+
+class BucketMetricValueAggregate(AttrDict[Any]):
+    """
+    :arg keys: (required)
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    keys: Sequence[str]  # type: ignore[assignment]
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class BulkIndexByScrollFailure(AttrDict[Any]):
+    """
+    :arg cause: (required)
+    :arg id: (required)
+    :arg index: (required)
+    :arg status: (required)
+    """
+
+    cause: "ErrorCause"
+    id: str
+    index: str
+    status: int
+
+
+class CardinalityAggregate(AttrDict[Any]):
+    """
+    :arg value: (required)
+    :arg meta:
+    """
+
+    value: int
+    meta: Mapping[str, Any]
+
+
+class CartesianBoundsAggregate(AttrDict[Any]):
+    """
+    :arg bounds:
+    :arg meta:
+    """
+
+    bounds: "TopLeftBottomRightGeoBounds"
+    meta: Mapping[str, Any]
+
+
+class CartesianCentroidAggregate(AttrDict[Any]):
+    """
+    :arg count: (required)
+    :arg location:
+    :arg meta:
+    """
+
+    count: int
+    location: "CartesianPoint"
+    meta: Mapping[str, Any]
+
+
+class CartesianPoint(AttrDict[Any]):
+    """
+    :arg x: (required)
+    :arg y: (required)
+    """
+
+    x: float
+    y: float
+
+
+class ChangePointAggregate(AttrDict[Any]):
+    """
+    :arg type: (required)
+    :arg bucket:
+    :arg meta:
+    """
+
+    type: "ChangeType"
+    bucket: "ChangePointBucket"
+    meta: Mapping[str, Any]
+
+
+class ChangePointBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    """
+
+    key: Union[int, float, str, bool, None]
+    doc_count: int
+
+
+class ChangeType(AttrDict[Any]):
+    """
+    :arg dip:
+    :arg distribution_change:
+    :arg indeterminable:
+    :arg non_stationary:
+    :arg spike:
+    :arg stationary:
+    :arg step_change:
+    :arg trend_change:
+    """
+
+    dip: "Dip"
+    distribution_change: "DistributionChange"
+    indeterminable: "Indeterminable"
+    non_stationary: "NonStationary"
+    spike: "Spike"
+    stationary: "Stationary"
+    step_change: "StepChange"
+    trend_change: "TrendChange"
+
+
+class ChildrenAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class ClusterDetails(AttrDict[Any]):
+    """
+    :arg status: (required)
+    :arg indices: (required)
+    :arg timed_out: (required)
+    :arg took:
+    :arg _shards:
+    :arg failures:
+    """
+
+    status: Literal["running", "successful", "partial", "skipped", "failed"]
+    indices: str
+    timed_out: bool
+    took: Any
+    _shards: "ShardStatistics"
+    failures: Sequence["ShardFailure"]
+
+
+class ClusterStatistics(AttrDict[Any]):
+    """
+    :arg skipped: (required)
+    :arg successful: (required)
+    :arg total: (required)
+    :arg running: (required)
+    :arg partial: (required)
+    :arg failed: (required)
+    :arg details:
+    """
+
+    skipped: int
+    successful: int
+    total: int
+    running: int
+    partial: int
+    failed: int
+    details: Mapping[str, "ClusterDetails"]
+
+
+class Collector(AttrDict[Any]):
+    """
+    :arg name: (required)
+    :arg reason: (required)
+    :arg time_in_nanos: (required)
+    :arg children:
+    """
+
+    name: str
+    reason: str
+    time_in_nanos: Any
+    children: Sequence["Collector"]
+
+
+class CompletionSuggest(AttrDict[Any]):
+    """
+    :arg options: (required)
+    :arg length: (required)
+    :arg offset: (required)
+    :arg text: (required)
+    """
+
+    options: Sequence["CompletionSuggestOption"]
+    length: int
+    offset: int
+    text: str
+
+
+class CompletionSuggestOption(AttrDict[Any]):
+    """
+    :arg text: (required)
+    :arg collate_match:
+    :arg contexts:
+    :arg fields:
+    :arg _id:
+    :arg _index:
+    :arg _routing:
+    :arg _score:
+    :arg _source:
+    :arg score:
+    """
+
+    text: str
+    collate_match: bool
+    contexts: Mapping[
+        str,
+        Sequence[
+            Union[
+                str, Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
+            ]
+        ],
+    ]
+    fields: Mapping[str, Any]
+    _id: str
+    _index: str
+    _routing: str
+    _score: float
+    _source: Any
+    score: float
+
+
+class CompositeAggregate(AttrDict[Any]):
+    """
+    :arg after_key:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    after_key: Mapping[str, Union[int, float, str, bool, None]]
+    buckets: Sequence["CompositeBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "CompositeBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class CompositeBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    """
+
+    key: Mapping[str, Union[int, float, str, bool, None]]
+    doc_count: int
+
+
+class CumulativeCardinalityAggregate(AttrDict[Any]):
+    """
+    Result of the `cumulative_cardinality` aggregation
+
+    :arg value: (required)
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: int
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class DateHistogramAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["DateHistogramBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "DateHistogramBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class DateHistogramBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    :arg key_as_string:
+    """
+
+    key: Any
+    doc_count: int
+    key_as_string: str
+
+
+class DateRangeAggregate(AttrDict[Any]):
+    """
+    Result of a `date_range` aggregation. Same format as a for a `range`
+    aggregation: `from` and `to` in `buckets` are milliseconds since the
+    Epoch, represented as a floating point number.
+
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["RangeBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class DerivativeAggregate(AttrDict[Any]):
+    """
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg normalized_value:
+    :arg normalized_value_as_string:
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    normalized_value: float
+    normalized_value_as_string: str
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class DfsKnnProfile(AttrDict[Any]):
+    """
+    :arg query: (required)
+    :arg rewrite_time: (required)
+    :arg collector: (required)
+    :arg vector_operations_count:
+    """
+
+    query: Sequence["KnnQueryProfileResult"]
+    rewrite_time: int
+    collector: Sequence["KnnCollectorResult"]
+    vector_operations_count: int
+
+
+class DfsProfile(AttrDict[Any]):
+    """
+    :arg statistics:
+    :arg knn:
+    """
+
+    statistics: "DfsStatisticsProfile"
+    knn: Sequence["DfsKnnProfile"]
+
+
+class DfsStatisticsBreakdown(AttrDict[Any]):
+    """
+    :arg collection_statistics: (required)
+    :arg collection_statistics_count: (required)
+    :arg create_weight: (required)
+    :arg create_weight_count: (required)
+    :arg rewrite: (required)
+    :arg rewrite_count: (required)
+    :arg term_statistics: (required)
+    :arg term_statistics_count: (required)
+    """
+
+    collection_statistics: int
+    collection_statistics_count: int
+    create_weight: int
+    create_weight_count: int
+    rewrite: int
+    rewrite_count: int
+    term_statistics: int
+    term_statistics_count: int
+
+
+class DfsStatisticsProfile(AttrDict[Any]):
+    """
+    :arg type: (required)
+    :arg description: (required)
+    :arg time_in_nanos: (required)
+    :arg breakdown: (required)
+    :arg time:
+    :arg debug:
+    :arg children:
+    """
+
+    type: str
+    description: str
+    time_in_nanos: Any
+    breakdown: "DfsStatisticsBreakdown"
+    time: Any
+    debug: Mapping[str, Any]
+    children: Sequence["DfsStatisticsProfile"]
+
+
+class Dip(AttrDict[Any]):
+    """
+    :arg p_value: (required)
+    :arg change_point: (required)
+    """
+
+    p_value: float
+    change_point: int
+
+
+class DistributionChange(AttrDict[Any]):
+    """
+    :arg p_value: (required)
+    :arg change_point: (required)
+    """
+
+    p_value: float
+    change_point: int
+
+
+class DoubleTermsAggregate(AttrDict[Any]):
+    """
+    Result of a `terms` aggregation when the field is some kind of decimal
+    number like a float, double, or distance.
+
+    :arg doc_count_error_upper_bound:
+    :arg sum_other_doc_count:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    doc_count_error_upper_bound: int
+    sum_other_doc_count: int
+    buckets: Sequence["DoubleTermsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "DoubleTermsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class DoubleTermsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    :arg key_as_string:
+    :arg doc_count_error_upper_bound:
+    """
+
+    key: float
+    doc_count: int
+    key_as_string: str
+    doc_count_error_upper_bound: int
+
+
+class ErrorCause(AttrDict[Any]):
+    """
+    Cause and details about a request failure. This class defines the
+    properties common to all error types. Additional details are also
+    provided, that depend on the error type.
+
+    :arg type: (required) The type of error
+    :arg reason: A human-readable explanation of the error, in English.
+    :arg stack_trace: The server stack trace. Present only if the
+        `error_trace=true` parameter was sent with the request.
+    :arg caused_by:
+    :arg root_cause:
+    :arg suppressed:
+    """
+
+    type: str
+    reason: Union[str, None]
+    stack_trace: str
+    caused_by: "ErrorCause"
+    root_cause: Sequence["ErrorCause"]
+    suppressed: Sequence["ErrorCause"]
+
+
+class Explanation(AttrDict[Any]):
+    """
+    :arg description: (required)
+    :arg details: (required)
+    :arg value: (required)
+    """
+
+    description: str
+    details: Sequence["ExplanationDetail"]
+    value: float
+
+
+class ExplanationDetail(AttrDict[Any]):
+    """
+    :arg description: (required)
+    :arg value: (required)
+    :arg details:
+    """
+
+    description: str
+    value: float
+    details: Sequence["ExplanationDetail"]
+
+
+class ExtendedStatsAggregate(AttrDict[Any]):
+    """
+    :arg sum_of_squares: (required)
+    :arg variance: (required)
+    :arg variance_population: (required)
+    :arg variance_sampling: (required)
+    :arg std_deviation: (required)
+    :arg std_deviation_population: (required)
+    :arg std_deviation_sampling: (required)
+    :arg count: (required)
+    :arg min: (required)
+    :arg max: (required)
+    :arg avg: (required)
+    :arg sum: (required)
+    :arg std_deviation_bounds:
+    :arg sum_of_squares_as_string:
+    :arg variance_as_string:
+    :arg variance_population_as_string:
+    :arg variance_sampling_as_string:
+    :arg std_deviation_as_string:
+    :arg std_deviation_bounds_as_string:
+    :arg min_as_string:
+    :arg max_as_string:
+    :arg avg_as_string:
+    :arg sum_as_string:
+    :arg meta:
+    """
+
+    sum_of_squares: Union[float, None]
+    variance: Union[float, None]
+    variance_population: Union[float, None]
+    variance_sampling: Union[float, None]
+    std_deviation: Union[float, None]
+    std_deviation_population: Union[float, None]
+    std_deviation_sampling: Union[float, None]
+    count: int
+    min: Union[float, None]
+    max: Union[float, None]
+    avg: Union[float, None]
+    sum: float
+    std_deviation_bounds: "StandardDeviationBounds"
+    sum_of_squares_as_string: str
+    variance_as_string: str
+    variance_population_as_string: str
+    variance_sampling_as_string: str
+    std_deviation_as_string: str
+    std_deviation_bounds_as_string: "StandardDeviationBoundsAsString"
+    min_as_string: str
+    max_as_string: str
+    avg_as_string: str
+    sum_as_string: str
+    meta: Mapping[str, Any]
+
+
+class ExtendedStatsBucketAggregate(AttrDict[Any]):
+    """
+    :arg sum_of_squares: (required)
+    :arg variance: (required)
+    :arg variance_population: (required)
+    :arg variance_sampling: (required)
+    :arg std_deviation: (required)
+    :arg std_deviation_population: (required)
+    :arg std_deviation_sampling: (required)
+    :arg count: (required)
+    :arg min: (required)
+    :arg max: (required)
+    :arg avg: (required)
+    :arg sum: (required)
+    :arg std_deviation_bounds:
+    :arg sum_of_squares_as_string:
+    :arg variance_as_string:
+    :arg variance_population_as_string:
+    :arg variance_sampling_as_string:
+    :arg std_deviation_as_string:
+    :arg std_deviation_bounds_as_string:
+    :arg min_as_string:
+    :arg max_as_string:
+    :arg avg_as_string:
+    :arg sum_as_string:
+    :arg meta:
+    """
+
+    sum_of_squares: Union[float, None]
+    variance: Union[float, None]
+    variance_population: Union[float, None]
+    variance_sampling: Union[float, None]
+    std_deviation: Union[float, None]
+    std_deviation_population: Union[float, None]
+    std_deviation_sampling: Union[float, None]
+    count: int
+    min: Union[float, None]
+    max: Union[float, None]
+    avg: Union[float, None]
+    sum: float
+    std_deviation_bounds: "StandardDeviationBounds"
+    sum_of_squares_as_string: str
+    variance_as_string: str
+    variance_population_as_string: str
+    variance_sampling_as_string: str
+    std_deviation_as_string: str
+    std_deviation_bounds_as_string: "StandardDeviationBoundsAsString"
+    min_as_string: str
+    max_as_string: str
+    avg_as_string: str
+    sum_as_string: str
+    meta: Mapping[str, Any]
+
+
+class FetchProfile(AttrDict[Any]):
+    """
+    :arg type: (required)
+    :arg description: (required)
+    :arg time_in_nanos: (required)
+    :arg breakdown: (required)
+    :arg debug:
+    :arg children:
+    """
+
+    type: str
+    description: str
+    time_in_nanos: Any
+    breakdown: "FetchProfileBreakdown"
+    debug: "FetchProfileDebug"
+    children: Sequence["FetchProfile"]
+
+
+class FetchProfileBreakdown(AttrDict[Any]):
+    """
+    :arg load_source:
+    :arg load_source_count:
+    :arg load_stored_fields:
+    :arg load_stored_fields_count:
+    :arg next_reader:
+    :arg next_reader_count:
+    :arg process_count:
+    :arg process:
+    """
+
+    load_source: int
+    load_source_count: int
+    load_stored_fields: int
+    load_stored_fields_count: int
+    next_reader: int
+    next_reader_count: int
+    process_count: int
+    process: int
+
+
+class FetchProfileDebug(AttrDict[Any]):
+    """
+    :arg stored_fields:
+    :arg fast_path:
+    """
+
+    stored_fields: Sequence[str]
+    fast_path: int
+
+
+class FilterAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class FiltersAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["FiltersBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "FiltersBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class FiltersBucket(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg key:
+    """
+
+    doc_count: int
+    key: str
+
+
+class FrequentItemSetsAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["FrequentItemSetsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "FrequentItemSetsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class FrequentItemSetsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg support: (required)
+    :arg doc_count: (required)
+    """
+
+    key: Mapping[str, Sequence[str]]
+    support: float
+    doc_count: int
+
+
+class GeoBoundsAggregate(AttrDict[Any]):
+    """
+    :arg bounds:
+    :arg meta:
+    """
+
+    bounds: Union[
+        "CoordsGeoBounds",
+        "TopLeftBottomRightGeoBounds",
+        "TopRightBottomLeftGeoBounds",
+        "WktGeoBounds",
+    ]
+    meta: Mapping[str, Any]
+
+
+class GeoCentroidAggregate(AttrDict[Any]):
+    """
+    :arg count: (required)
+    :arg location:
+    :arg meta:
+    """
+
+    count: int
+    location: Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]
+    meta: Mapping[str, Any]
+
+
+class GeoDistanceAggregate(AttrDict[Any]):
+    """
+    Result of a `geo_distance` aggregation. The unit for `from` and `to`
+    is meters by default.
+
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["RangeBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class GeoHashGridAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["GeoHashGridBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "GeoHashGridBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class GeoHashGridBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    """
+
+    key: str
+    doc_count: int
+
+
+class GeoHexGridAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["GeoHexGridBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "GeoHexGridBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class GeoHexGridBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    """
+
+    key: str
+    doc_count: int
+
+
+class GeoLine(AttrDict[Any]):
+    """
+    A GeoJson GeoLine.
+
+    :arg type: (required) Always `"LineString"`
+    :arg coordinates: (required) Array of `[lon, lat]` coordinates
+    """
+
+    type: str
+    coordinates: Sequence[Sequence[float]]
+
+
+class GeoLineAggregate(AttrDict[Any]):
+    """
+    :arg type: (required)
+    :arg geometry: (required)
+    :arg properties: (required)
+    :arg meta:
+    """
+
+    type: str
+    geometry: "GeoLine"
+    properties: Any
+    meta: Mapping[str, Any]
+
+
+class GeoTileGridAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["GeoTileGridBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "GeoTileGridBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class GeoTileGridBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    """
+
+    key: str
+    doc_count: int
+
+
+class GlobalAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class HdrPercentileRanksAggregate(AttrDict[Any]):
+    """
+    :arg values: (required)
+    :arg meta:
+    """
+
+    values: Union[
+        Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+    ]
+    meta: Mapping[str, Any]
+
+
+class HdrPercentilesAggregate(AttrDict[Any]):
+    """
+    :arg values: (required)
+    :arg meta:
+    """
+
+    values: Union[
+        Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+    ]
+    meta: Mapping[str, Any]
+
+
+class HistogramAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["HistogramBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "HistogramBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class HistogramBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    :arg key_as_string:
+    """
+
+    key: float
+    doc_count: int
+    key_as_string: str
+
+
+class Hit(AttrDict[Any]):
+    """
+    :arg index: (required)
+    :arg id:
+    :arg score:
+    :arg explanation:
+    :arg fields:
+    :arg highlight:
+    :arg inner_hits:
+    :arg matched_queries:
+    :arg nested:
+    :arg ignored:
+    :arg ignored_field_values:
+    :arg shard:
+    :arg node:
+    :arg routing:
+    :arg source:
+    :arg rank:
+    :arg seq_no:
+    :arg primary_term:
+    :arg version:
+    :arg sort:
+    """
+
+    index: str
+    id: str
+    score: Union[float, None]
+    explanation: "Explanation"
+    fields: Mapping[str, Any]
+    highlight: Mapping[str, Sequence[str]]
+    inner_hits: Mapping[str, "InnerHitsResult"]
+    matched_queries: Union[Sequence[str], Mapping[str, float]]
+    nested: "NestedIdentity"
+    ignored: Sequence[str]
+    ignored_field_values: Mapping[str, Sequence[Any]]
+    shard: str
+    node: str
+    routing: str
+    source: Any
+    rank: int
+    seq_no: int
+    primary_term: int
+    version: int
+    sort: Sequence[Union[int, float, str, bool, None]]
+
+
+class HitsMetadata(AttrDict[Any]):
+    """
+    :arg hits: (required)
+    :arg total: Total hit count information, present only if
+        `track_total_hits` wasn't `false` in the search request.
+    :arg max_score:
+    """
+
+    hits: Sequence["Hit"]
+    total: Union["TotalHits", int]
+    max_score: Union[float, None]
+
+
+class Indeterminable(AttrDict[Any]):
+    """
+    :arg reason: (required)
+    """
+
+    reason: str
+
+
+class InferenceAggregate(AttrDict[Any]):
+    """
+    :arg value:
+    :arg feature_importance:
+    :arg top_classes:
+    :arg warning:
+    :arg meta:
+    """
+
+    value: Union[int, float, str, bool, None]
+    feature_importance: Sequence["InferenceFeatureImportance"]
+    top_classes: Sequence["InferenceTopClassEntry"]
+    warning: str
+    meta: Mapping[str, Any]
+
+
+class InferenceClassImportance(AttrDict[Any]):
+    """
+    :arg class_name: (required)
+    :arg importance: (required)
+    """
+
+    class_name: str
+    importance: float
+
+
+class InferenceFeatureImportance(AttrDict[Any]):
+    """
+    :arg feature_name: (required)
+    :arg importance:
+    :arg classes:
+    """
+
+    feature_name: str
+    importance: float
+    classes: Sequence["InferenceClassImportance"]
+
+
+class InferenceTopClassEntry(AttrDict[Any]):
+    """
+    :arg class_name: (required)
+    :arg class_probability: (required)
+    :arg class_score: (required)
+    """
+
+    class_name: Union[int, float, str, bool, None]
+    class_probability: float
+    class_score: float
+
+
+class InnerHitsResult(AttrDict[Any]):
+    """
+    :arg hits: (required)
+    """
+
+    hits: "HitsMetadata"
+
+
+class IpPrefixAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["IpPrefixBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "IpPrefixBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class IpPrefixBucket(AttrDict[Any]):
+    """
+    :arg is_ipv6: (required)
+    :arg key: (required)
+    :arg prefix_length: (required)
+    :arg doc_count: (required)
+    :arg netmask:
+    """
+
+    is_ipv6: bool
+    key: str
+    prefix_length: int
+    doc_count: int
+    netmask: str
+
+
+class IpRangeAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["IpRangeBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "IpRangeBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class IpRangeBucket(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg key:
+    :arg from:
+    :arg to:
+    """
+
+    doc_count: int
+    key: str
+    from_: str
+    to: str
+
+
+class KnnCollectorResult(AttrDict[Any]):
+    """
+    :arg name: (required)
+    :arg reason: (required)
+    :arg time_in_nanos: (required)
+    :arg time:
+    :arg children:
+    """
+
+    name: str
+    reason: str
+    time_in_nanos: Any
+    time: Any
+    children: Sequence["KnnCollectorResult"]
+
+
+class KnnQueryProfileBreakdown(AttrDict[Any]):
+    """
+    :arg advance: (required)
+    :arg advance_count: (required)
+    :arg build_scorer: (required)
+    :arg build_scorer_count: (required)
+    :arg compute_max_score: (required)
+    :arg compute_max_score_count: (required)
+    :arg count_weight: (required)
+    :arg count_weight_count: (required)
+    :arg create_weight: (required)
+    :arg create_weight_count: (required)
+    :arg match: (required)
+    :arg match_count: (required)
+    :arg next_doc: (required)
+    :arg next_doc_count: (required)
+    :arg score: (required)
+    :arg score_count: (required)
+    :arg set_min_competitive_score: (required)
+    :arg set_min_competitive_score_count: (required)
+    :arg shallow_advance: (required)
+    :arg shallow_advance_count: (required)
+    """
+
+    advance: int
+    advance_count: int
+    build_scorer: int
+    build_scorer_count: int
+    compute_max_score: int
+    compute_max_score_count: int
+    count_weight: int
+    count_weight_count: int
+    create_weight: int
+    create_weight_count: int
+    match: int
+    match_count: int
+    next_doc: int
+    next_doc_count: int
+    score: int
+    score_count: int
+    set_min_competitive_score: int
+    set_min_competitive_score_count: int
+    shallow_advance: int
+    shallow_advance_count: int
+
+
+class KnnQueryProfileResult(AttrDict[Any]):
+    """
+    :arg type: (required)
+    :arg description: (required)
+    :arg time_in_nanos: (required)
+    :arg breakdown: (required)
+    :arg time:
+    :arg debug:
+    :arg children:
+    """
+
+    type: str
+    description: str
+    time_in_nanos: Any
+    breakdown: "KnnQueryProfileBreakdown"
+    time: Any
+    debug: Mapping[str, Any]
+    children: Sequence["KnnQueryProfileResult"]
+
+
+class LongRareTermsAggregate(AttrDict[Any]):
+    """
+    Result of the `rare_terms` aggregation when the field is some kind of
+    whole number like a integer, long, or a date.
+
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["LongRareTermsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "LongRareTermsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class LongRareTermsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    :arg key_as_string:
+    """
+
+    key: int
+    doc_count: int
+    key_as_string: str
+
+
+class LongTermsAggregate(AttrDict[Any]):
+    """
+    Result of a `terms` aggregation when the field is some kind of whole
+    number like a integer, long, or a date.
+
+    :arg doc_count_error_upper_bound:
+    :arg sum_other_doc_count:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    doc_count_error_upper_bound: int
+    sum_other_doc_count: int
+    buckets: Sequence["LongTermsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "LongTermsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class LongTermsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    :arg key_as_string:
+    :arg doc_count_error_upper_bound:
+    """
+
+    key: int
+    doc_count: int
+    key_as_string: str
+    doc_count_error_upper_bound: int
+
+
+class MatrixStatsAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg fields:
+    :arg meta:
+    """
+
+    doc_count: int
+    fields: Sequence["MatrixStatsFields"]
+    meta: Mapping[str, Any]
+
+
+class MatrixStatsFields(AttrDict[Any]):
+    """
+    :arg name: (required)
+    :arg count: (required)
+    :arg mean: (required)
+    :arg variance: (required)
+    :arg skewness: (required)
+    :arg kurtosis: (required)
+    :arg covariance: (required)
+    :arg correlation: (required)
+    """
+
+    name: str
+    count: int
+    mean: float
+    variance: float
+    skewness: float
+    kurtosis: float
+    covariance: Mapping[str, float]
+    correlation: Mapping[str, float]
+
+
+class MaxAggregate(AttrDict[Any]):
+    """
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class MedianAbsoluteDeviationAggregate(AttrDict[Any]):
+    """
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class MinAggregate(AttrDict[Any]):
+    """
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class MissingAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class MultiTermsAggregate(AttrDict[Any]):
+    """
+    :arg doc_count_error_upper_bound:
+    :arg sum_other_doc_count:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    doc_count_error_upper_bound: int
+    sum_other_doc_count: int
+    buckets: Sequence["MultiTermsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "MultiTermsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class MultiTermsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    :arg key_as_string:
+    :arg doc_count_error_upper_bound:
+    """
+
+    key: Sequence[Union[int, float, str, bool, None]]
+    doc_count: int
+    key_as_string: str
+    doc_count_error_upper_bound: int
+
+
+class NestedAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class NestedIdentity(AttrDict[Any]):
+    """
+    :arg field: (required)
+    :arg offset: (required)
+    :arg _nested:
+    """
+
+    field: str
+    offset: int
+    _nested: "NestedIdentity"
+
+
+class NonStationary(AttrDict[Any]):
+    """
+    :arg p_value: (required)
+    :arg r_value: (required)
+    :arg trend: (required)
+    """
+
+    p_value: float
+    r_value: float
+    trend: str
+
+
+class ParentAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class PercentilesBucketAggregate(AttrDict[Any]):
+    """
+    :arg values: (required)
+    :arg meta:
+    """
+
+    values: Union[
+        Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+    ]
+    meta: Mapping[str, Any]
+
+
+class PhraseSuggest(AttrDict[Any]):
+    """
+    :arg options: (required)
+    :arg length: (required)
+    :arg offset: (required)
+    :arg text: (required)
+    """
+
+    options: Sequence["PhraseSuggestOption"]
+    length: int
+    offset: int
+    text: str
+
+
+class PhraseSuggestOption(AttrDict[Any]):
+    """
+    :arg text: (required)
+    :arg score: (required)
+    :arg highlighted:
+    :arg collate_match:
+    """
+
+    text: str
+    score: float
+    highlighted: str
+    collate_match: bool
+
+
+class Profile(AttrDict[Any]):
+    """
+    :arg shards: (required)
+    """
+
+    shards: Sequence["ShardProfile"]
+
+
+class QueryBreakdown(AttrDict[Any]):
+    """
+    :arg advance: (required)
+    :arg advance_count: (required)
+    :arg build_scorer: (required)
+    :arg build_scorer_count: (required)
+    :arg create_weight: (required)
+    :arg create_weight_count: (required)
+    :arg match: (required)
+    :arg match_count: (required)
+    :arg shallow_advance: (required)
+    :arg shallow_advance_count: (required)
+    :arg next_doc: (required)
+    :arg next_doc_count: (required)
+    :arg score: (required)
+    :arg score_count: (required)
+    :arg compute_max_score: (required)
+    :arg compute_max_score_count: (required)
+    :arg count_weight: (required)
+    :arg count_weight_count: (required)
+    :arg set_min_competitive_score: (required)
+    :arg set_min_competitive_score_count: (required)
+    """
+
+    advance: int
+    advance_count: int
+    build_scorer: int
+    build_scorer_count: int
+    create_weight: int
+    create_weight_count: int
+    match: int
+    match_count: int
+    shallow_advance: int
+    shallow_advance_count: int
+    next_doc: int
+    next_doc_count: int
+    score: int
+    score_count: int
+    compute_max_score: int
+    compute_max_score_count: int
+    count_weight: int
+    count_weight_count: int
+    set_min_competitive_score: int
+    set_min_competitive_score_count: int
+
+
+class QueryProfile(AttrDict[Any]):
+    """
+    :arg breakdown: (required)
+    :arg description: (required)
+    :arg time_in_nanos: (required)
+    :arg type: (required)
+    :arg children:
+    """
+
+    breakdown: "QueryBreakdown"
+    description: str
+    time_in_nanos: Any
+    type: str
+    children: Sequence["QueryProfile"]
+
+
+class RangeAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["RangeBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class RangeBucket(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg from:
+    :arg to:
+    :arg from_as_string:
+    :arg to_as_string:
+    :arg key: The bucket key. Present if the aggregation is _not_ keyed
+    """
+
+    doc_count: int
+    from_: float
+    to: float
+    from_as_string: str
+    to_as_string: str
+    key: str
+
+
+class RateAggregate(AttrDict[Any]):
+    """
+    :arg value: (required)
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: float
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class Retries(AttrDict[Any]):
+    """
+    :arg bulk: (required) The number of bulk actions retried.
+    :arg search: (required) The number of search actions retried.
+    """
+
+    bulk: int
+    search: int
+
+
+class ReverseNestedAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class SamplerAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class ScriptedMetricAggregate(AttrDict[Any]):
+    """
+    :arg value: (required)
+    :arg meta:
+    """
+
+    value: Any
+    meta: Mapping[str, Any]
+
+
+class SearchProfile(AttrDict[Any]):
+    """
+    :arg collector: (required)
+    :arg query: (required)
+    :arg rewrite_time: (required)
+    """
+
+    collector: Sequence["Collector"]
+    query: Sequence["QueryProfile"]
+    rewrite_time: int
+
+
+class ShardFailure(AttrDict[Any]):
+    """
+    :arg reason: (required)
+    :arg index:
+    :arg node:
+    :arg shard:
+    :arg status:
+    :arg primary:
+    """
+
+    reason: "ErrorCause"
+    index: str
+    node: str
+    shard: int
+    status: str
+    primary: bool
+
+
+class ShardProfile(AttrDict[Any]):
+    """
+    :arg aggregations: (required)
+    :arg cluster: (required)
+    :arg id: (required)
+    :arg index: (required)
+    :arg node_id: (required)
+    :arg searches: (required)
+    :arg shard_id: (required)
+    :arg dfs:
+    :arg fetch:
+    """
+
+    aggregations: Sequence["AggregationProfile"]
+    cluster: str
+    id: str
+    index: str
+    node_id: str
+    searches: Sequence["SearchProfile"]
+    shard_id: int
+    dfs: "DfsProfile"
+    fetch: "FetchProfile"
+
+
+class ShardStatistics(AttrDict[Any]):
+    """
+    :arg failed: (required) The number of shards the operation or search
+        attempted to run on but failed.
+    :arg successful: (required) The number of shards the operation or
+        search succeeded on.
+    :arg total: (required) The number of shards the operation or search
+        will run on overall.
+    :arg failures:
+    :arg skipped:
+    """
+
+    failed: int
+    successful: int
+    total: int
+    failures: Sequence["ShardFailure"]
+    skipped: int
+
+
+class SignificantLongTermsAggregate(AttrDict[Any]):
+    """
+    :arg bg_count:
+    :arg doc_count:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    bg_count: int
+    doc_count: int
+    buckets: Sequence["SignificantLongTermsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "SignificantLongTermsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class SignificantLongTermsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg score: (required)
+    :arg bg_count: (required)
+    :arg doc_count: (required)
+    :arg key_as_string:
+    """
+
+    key: int
+    score: float
+    bg_count: int
+    doc_count: int
+    key_as_string: str
+
+
+class SignificantStringTermsAggregate(AttrDict[Any]):
+    """
+    :arg bg_count:
+    :arg doc_count:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    bg_count: int
+    doc_count: int
+    buckets: Sequence["SignificantStringTermsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "SignificantStringTermsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class SignificantStringTermsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg score: (required)
+    :arg bg_count: (required)
+    :arg doc_count: (required)
+    """
+
+    key: str
+    score: float
+    bg_count: int
+    doc_count: int
+
+
+class SimpleValueAggregate(AttrDict[Any]):
+    """
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class Spike(AttrDict[Any]):
+    """
+    :arg p_value: (required)
+    :arg change_point: (required)
+    """
+
+    p_value: float
+    change_point: int
+
+
+class StandardDeviationBounds(AttrDict[Any]):
+    """
+    :arg upper: (required)
+    :arg lower: (required)
+    :arg upper_population: (required)
+    :arg lower_population: (required)
+    :arg upper_sampling: (required)
+    :arg lower_sampling: (required)
+    """
+
+    upper: Union[float, None]
+    lower: Union[float, None]
+    upper_population: Union[float, None]
+    lower_population: Union[float, None]
+    upper_sampling: Union[float, None]
+    lower_sampling: Union[float, None]
+
+
+class StandardDeviationBoundsAsString(AttrDict[Any]):
+    """
+    :arg upper: (required)
+    :arg lower: (required)
+    :arg upper_population: (required)
+    :arg lower_population: (required)
+    :arg upper_sampling: (required)
+    :arg lower_sampling: (required)
+    """
+
+    upper: str
+    lower: str
+    upper_population: str
+    lower_population: str
+    upper_sampling: str
+    lower_sampling: str
+
+
+class Stationary(AttrDict[Any]):
+    pass
+
+
+class StatsAggregate(AttrDict[Any]):
+    """
+    Statistics aggregation result. `min`, `max` and `avg` are missing if
+    there were no values to process (`count` is zero).
+
+    :arg count: (required)
+    :arg min: (required)
+    :arg max: (required)
+    :arg avg: (required)
+    :arg sum: (required)
+    :arg min_as_string:
+    :arg max_as_string:
+    :arg avg_as_string:
+    :arg sum_as_string:
+    :arg meta:
+    """
+
+    count: int
+    min: Union[float, None]
+    max: Union[float, None]
+    avg: Union[float, None]
+    sum: float
+    min_as_string: str
+    max_as_string: str
+    avg_as_string: str
+    sum_as_string: str
+    meta: Mapping[str, Any]
+
+
+class StatsBucketAggregate(AttrDict[Any]):
+    """
+    :arg count: (required)
+    :arg min: (required)
+    :arg max: (required)
+    :arg avg: (required)
+    :arg sum: (required)
+    :arg min_as_string:
+    :arg max_as_string:
+    :arg avg_as_string:
+    :arg sum_as_string:
+    :arg meta:
+    """
+
+    count: int
+    min: Union[float, None]
+    max: Union[float, None]
+    avg: Union[float, None]
+    sum: float
+    min_as_string: str
+    max_as_string: str
+    avg_as_string: str
+    sum_as_string: str
+    meta: Mapping[str, Any]
+
+
+class StepChange(AttrDict[Any]):
+    """
+    :arg p_value: (required)
+    :arg change_point: (required)
+    """
+
+    p_value: float
+    change_point: int
+
+
+class StringRareTermsAggregate(AttrDict[Any]):
+    """
+    Result of the `rare_terms` aggregation when the field is a string.
+
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["StringRareTermsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "StringRareTermsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class StringRareTermsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    """
+
+    key: str
+    doc_count: int
+
+
+class StringStatsAggregate(AttrDict[Any]):
+    """
+    :arg count: (required)
+    :arg min_length: (required)
+    :arg max_length: (required)
+    :arg avg_length: (required)
+    :arg entropy: (required)
+    :arg distribution:
+    :arg min_length_as_string:
+    :arg max_length_as_string:
+    :arg avg_length_as_string:
+    :arg meta:
+    """
+
+    count: int
+    min_length: Union[int, None]
+    max_length: Union[int, None]
+    avg_length: Union[float, None]
+    entropy: Union[float, None]
+    distribution: Union[Mapping[str, float], None]
+    min_length_as_string: str
+    max_length_as_string: str
+    avg_length_as_string: str
+    meta: Mapping[str, Any]
+
+
+class StringTermsAggregate(AttrDict[Any]):
+    """
+    Result of a `terms` aggregation when the field is a string.
+
+    :arg doc_count_error_upper_bound:
+    :arg sum_other_doc_count:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    doc_count_error_upper_bound: int
+    sum_other_doc_count: int
+    buckets: Sequence["StringTermsBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "StringTermsBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class StringTermsBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    :arg doc_count_error_upper_bound:
+    """
+
+    key: Union[int, float, str, bool, None]
+    doc_count: int
+    doc_count_error_upper_bound: int
+
+
+class SumAggregate(AttrDict[Any]):
+    """
+    Sum aggregation result. `value` is always present and is zero if there
+    were no values to process.
+
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class TDigestPercentileRanksAggregate(AttrDict[Any]):
+    """
+    :arg values: (required)
+    :arg meta:
+    """
+
+    values: Union[
+        Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+    ]
+    meta: Mapping[str, Any]
+
+
+class TDigestPercentilesAggregate(AttrDict[Any]):
+    """
+    :arg values: (required)
+    :arg meta:
+    """
+
+    values: Union[
+        Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"]
+    ]
+    meta: Mapping[str, Any]
+
+
+class TTestAggregate(AttrDict[Any]):
+    """
+    :arg value: (required)
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class TermSuggest(AttrDict[Any]):
+    """
+    :arg options: (required)
+    :arg length: (required)
+    :arg offset: (required)
+    :arg text: (required)
+    """
+
+    options: Sequence["TermSuggestOption"]
+    length: int
+    offset: int
+    text: str
+
+
+class TermSuggestOption(AttrDict[Any]):
+    """
+    :arg text: (required)
+    :arg score: (required)
+    :arg freq: (required)
+    :arg highlighted:
+    :arg collate_match:
+    """
+
+    text: str
+    score: float
+    freq: int
+    highlighted: str
+    collate_match: bool
+
+
+class TimeSeriesAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["TimeSeriesBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "TimeSeriesBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class TimeSeriesBucket(AttrDict[Any]):
+    """
+    :arg key: (required)
+    :arg doc_count: (required)
+    """
+
+    key: Mapping[str, Union[int, float, str, bool, None]]
+    doc_count: int
+
+
+class TopHitsAggregate(AttrDict[Any]):
+    """
+    :arg hits: (required)
+    :arg meta:
+    """
+
+    hits: "HitsMetadata"
+    meta: Mapping[str, Any]
+
+
+class TopMetrics(AttrDict[Any]):
+    """
+    :arg sort: (required)
+    :arg metrics: (required)
+    """
+
+    sort: Sequence[Union[Union[int, float, str, bool, None], None]]
+    metrics: Mapping[str, Union[Union[int, float, str, bool, None], None]]
+
+
+class TopMetricsAggregate(AttrDict[Any]):
+    """
+    :arg top: (required)
+    :arg meta:
+    """
+
+    top: Sequence["TopMetrics"]
+    meta: Mapping[str, Any]
+
+
+class TotalHits(AttrDict[Any]):
+    """
+    :arg relation: (required)
+    :arg value: (required)
+    """
+
+    relation: Literal["eq", "gte"]
+    value: int
+
+
+class TrendChange(AttrDict[Any]):
+    """
+    :arg p_value: (required)
+    :arg r_value: (required)
+    :arg change_point: (required)
+    """
+
+    p_value: float
+    r_value: float
+    change_point: int
+
+
+class UnmappedRareTermsAggregate(AttrDict[Any]):
+    """
+    Result of a `rare_terms` aggregation when the field is unmapped.
+    `buckets` is always empty.
+
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence[Any]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, Any]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class UnmappedSamplerAggregate(AttrDict[Any]):
+    """
+    :arg doc_count: (required)
+    :arg meta:
+    """
+
+    doc_count: int
+    meta: Mapping[str, Any]
+
+
+class UnmappedSignificantTermsAggregate(AttrDict[Any]):
+    """
+    Result of the `significant_terms` aggregation on an unmapped field.
+    `buckets` is always empty.
+
+    :arg bg_count:
+    :arg doc_count:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    bg_count: int
+    doc_count: int
+    buckets: Sequence[Any]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, Any]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class UnmappedTermsAggregate(AttrDict[Any]):
+    """
+    Result of a `terms` aggregation when the field is unmapped. `buckets`
+    is always empty.
+
+    :arg doc_count_error_upper_bound:
+    :arg sum_other_doc_count:
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    doc_count_error_upper_bound: int
+    sum_other_doc_count: int
+    buckets: Sequence[Any]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, Any]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class ValueCountAggregate(AttrDict[Any]):
+    """
+    Value count aggregation result. `value` is always present.
+
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
+
+
+class VariableWidthHistogramAggregate(AttrDict[Any]):
+    """
+    :arg buckets: (required) the aggregation buckets as a list
+    :arg meta:
+    """
+
+    buckets: Sequence["VariableWidthHistogramBucket"]
+    meta: Mapping[str, Any]
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, "VariableWidthHistogramBucket"]:
+        return self.buckets  # type: ignore[return-value]
+
+
+class VariableWidthHistogramBucket(AttrDict[Any]):
+    """
+    :arg min: (required)
+    :arg key: (required)
+    :arg max: (required)
+    :arg doc_count: (required)
+    :arg min_as_string:
+    :arg key_as_string:
+    :arg max_as_string:
+    """
+
+    min: float
+    key: float
+    max: float
+    doc_count: int
+    min_as_string: str
+    key_as_string: str
+    max_as_string: str
+
+
+class WeightedAvgAggregate(AttrDict[Any]):
+    """
+    Weighted average aggregation result. `value` is missing if the weight
+    was set to zero.
+
+    :arg value: (required) The metric value. A missing value generally
+        means that there was no data to aggregate, unless specified
+        otherwise.
+    :arg value_as_string:
+    :arg meta:
+    """
+
+    value: Union[float, None]
+    value_as_string: str
+    meta: Mapping[str, Any]
diff -pruN 8.17.2-2/elasticsearch/dsl/update_by_query.py 9.2.0-1/elasticsearch/dsl/update_by_query.py
--- 8.17.2-2/elasticsearch/dsl/update_by_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/update_by_query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,19 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from ._async.update_by_query import AsyncUpdateByQuery  # noqa: F401
+from ._sync.update_by_query import UpdateByQuery  # noqa: F401
diff -pruN 8.17.2-2/elasticsearch/dsl/update_by_query_base.py 9.2.0-1/elasticsearch/dsl/update_by_query_base.py
--- 8.17.2-2/elasticsearch/dsl/update_by_query_base.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/update_by_query_base.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,149 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, Dict, Type
+
+from typing_extensions import Self
+
+from .query import Bool, Q
+from .response import UpdateByQueryResponse
+from .search_base import ProxyDescriptor, QueryProxy, Request
+from .utils import _R, recursive_to_dict
+
+
+class UpdateByQueryBase(Request[_R]):
+    query = ProxyDescriptor[Self]("query")
+
+    def __init__(self, **kwargs: Any):
+        """
+        Update by query request to elasticsearch.
+
+        :arg using: `Elasticsearch` instance to use
+        :arg index: limit the search to index
+        :arg doc_type: only query this type.
+
+        All the parameters supplied (or omitted) at creation type can be later
+        overridden by methods (`using`, `index` and `doc_type` respectively).
+
+        """
+        super().__init__(**kwargs)
+        self._response_class = UpdateByQueryResponse[_R]
+        self._script: Dict[str, Any] = {}
+        self._query_proxy = QueryProxy(self, "query")
+
+    def filter(self, *args: Any, **kwargs: Any) -> Self:
+        return self.query(Bool(filter=[Q(*args, **kwargs)]))
+
+    def exclude(self, *args: Any, **kwargs: Any) -> Self:
+        return self.query(Bool(filter=[~Q(*args, **kwargs)]))
+
+    @classmethod
+    def from_dict(cls, d: Dict[str, Any]) -> Self:
+        """
+        Construct a new `UpdateByQuery` instance from a raw dict containing the search
+        body. Useful when migrating from raw dictionaries.
+
+        Example::
+
+            ubq = UpdateByQuery.from_dict({
+                "query": {
+                    "bool": {
+                        "must": [...]
+                    }
+                },
+                "script": {...}
+            })
+            ubq = ubq.filter('term', published=True)
+        """
+        u = cls()
+        u.update_from_dict(d)
+        return u
+
+    def _clone(self) -> Self:
+        """
+        Return a clone of the current search request. Performs a shallow copy
+        of all the underlying objects. Used internally by most state modifying
+        APIs.
+        """
+        ubq = super()._clone()
+
+        ubq._response_class = self._response_class
+        ubq._script = self._script.copy()
+        ubq.query._proxied = self.query._proxied
+        return ubq
+
+    def response_class(self, cls: Type[UpdateByQueryResponse[_R]]) -> Self:
+        """
+        Override the default wrapper used for the response.
+        """
+        ubq = self._clone()
+        ubq._response_class = cls
+        return ubq
+
+    def update_from_dict(self, d: Dict[str, Any]) -> Self:
+        """
+        Apply options from a serialized body to the current instance. Modifies
+        the object in-place. Used mostly by ``from_dict``.
+        """
+        d = d.copy()
+        if "query" in d:
+            self.query._proxied = Q(d.pop("query"))
+        if "script" in d:
+            self._script = d.pop("script")
+        self._extra.update(d)
+        return self
+
+    def script(self, **kwargs: Any) -> Self:
+        """
+        Define update action to take:
+        https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting-using.html
+        for more details.
+
+        Note: the API only accepts a single script, so
+        calling the script multiple times will overwrite.
+
+        Example::
+
+            ubq = Search()
+            ubq = ubq.script(source="ctx._source.likes++"")
+            ubq = ubq.script(source="ctx._source.likes += params.f"",
+                         lang="expression",
+                         params={'f': 3})
+        """
+        ubq = self._clone()
+        if ubq._script:
+            ubq._script = {}
+        ubq._script.update(kwargs)
+        return ubq
+
+    def to_dict(self, **kwargs: Any) -> Dict[str, Any]:
+        """
+        Serialize the search into the dictionary that will be sent over as the
+        request'ubq body.
+
+        All additional keyword arguments will be included into the dictionary.
+        """
+        d = {}
+        if self.query:
+            d["query"] = self.query.to_dict()
+
+        if self._script:
+            d["script"] = self._script
+
+        d.update(recursive_to_dict(self._extra))
+        d.update(recursive_to_dict(kwargs))
+        return d
diff -pruN 8.17.2-2/elasticsearch/dsl/utils.py 9.2.0-1/elasticsearch/dsl/utils.py
--- 8.17.2-2/elasticsearch/dsl/utils.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/utils.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,687 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+import collections.abc
+from copy import copy
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    ClassVar,
+    Dict,
+    Generic,
+    Iterable,
+    Iterator,
+    List,
+    Mapping,
+    Optional,
+    Tuple,
+    Type,
+    Union,
+    cast,
+)
+
+from elastic_transport.client_utils import DEFAULT
+from typing_extensions import Self, TypeAlias, TypeVar
+
+from .exceptions import UnknownDslObject, ValidationException
+
+if TYPE_CHECKING:
+    from elastic_transport import ObjectApiResponse
+
+    from elasticsearch import AsyncElasticsearch, Elasticsearch
+
+    from .document_base import DocumentOptions
+    from .field import Field
+    from .index_base import IndexBase
+    from .response import Hit  # noqa: F401
+    from .types import Hit as HitBaseType
+
+UsingType: TypeAlias = Union[str, "Elasticsearch"]
+AsyncUsingType: TypeAlias = Union[str, "AsyncElasticsearch"]
+AnyUsingType: TypeAlias = Union[str, "Elasticsearch", "AsyncElasticsearch"]
+
+_ValT = TypeVar("_ValT")  # used by AttrDict
+_R = TypeVar("_R", default="Hit")  # used by Search and Response classes
+
+SKIP_VALUES = ("", None)
+EXPAND__TO_DOT = True
+
+DOC_META_FIELDS = frozenset(
+    (
+        "id",
+        "routing",
+    )
+)
+
+META_FIELDS = frozenset(
+    (
+        # Elasticsearch metadata fields, except 'type'
+        "index",
+        "using",
+        "score",
+        "version",
+        "seq_no",
+        "primary_term",
+    )
+).union(DOC_META_FIELDS)
+
+
+def _wrap(val: Any, obj_wrapper: Optional[Callable[[Any], Any]] = None) -> Any:
+    if isinstance(val, dict):
+        return AttrDict(val) if obj_wrapper is None else obj_wrapper(val)
+    if isinstance(val, list):
+        return AttrList(val)
+    return val
+
+
+def _recursive_to_dict(value: Any) -> Any:
+    if hasattr(value, "to_dict"):
+        return value.to_dict()
+    elif isinstance(value, dict) or isinstance(value, AttrDict):
+        return {k: _recursive_to_dict(v) for k, v in value.items()}
+    elif isinstance(value, list) or isinstance(value, AttrList):
+        return [recursive_to_dict(elem) for elem in value]
+    else:
+        return value
+
+
+class AttrList(Generic[_ValT]):
+    def __init__(
+        self, l: List[_ValT], obj_wrapper: Optional[Callable[[_ValT], Any]] = None
+    ):
+        # make iterables into lists
+        if not isinstance(l, list):
+            l = list(l)
+        self._l_ = l
+        self._obj_wrapper = obj_wrapper
+
+    def __repr__(self) -> str:
+        return repr(self._l_)
+
+    def __eq__(self, other: Any) -> bool:
+        if isinstance(other, AttrList):
+            return other._l_ == self._l_
+        # make sure we still equal to a dict with the same data
+        return bool(other == self._l_)
+
+    def __ne__(self, other: Any) -> bool:
+        return not self == other
+
+    def __getitem__(self, k: Union[int, slice]) -> Any:
+        l = self._l_[k]
+        if isinstance(k, slice):
+            return AttrList[_ValT](l, obj_wrapper=self._obj_wrapper)  # type: ignore[arg-type]
+        return _wrap(l, self._obj_wrapper)
+
+    def __setitem__(self, k: int, value: _ValT) -> None:
+        self._l_[k] = value
+
+    def __iter__(self) -> Iterator[Any]:
+        return map(lambda i: _wrap(i, self._obj_wrapper), self._l_)
+
+    def __len__(self) -> int:
+        return len(self._l_)
+
+    def __nonzero__(self) -> bool:
+        return bool(self._l_)
+
+    __bool__ = __nonzero__
+
+    def __getattr__(self, name: str) -> Any:
+        return getattr(self._l_, name)
+
+    def __getstate__(self) -> Tuple[List[_ValT], Optional[Callable[[_ValT], Any]]]:
+        return self._l_, self._obj_wrapper
+
+    def __setstate__(
+        self, state: Tuple[List[_ValT], Optional[Callable[[_ValT], Any]]]
+    ) -> None:
+        self._l_, self._obj_wrapper = state
+
+    def to_list(self) -> List[_ValT]:
+        return self._l_
+
+
+class AttrDict(Generic[_ValT]):
+    """
+    Helper class to provide attribute like access (read and write) to
+    dictionaries. Used to provide a convenient way to access both results and
+    nested dsl dicts.
+    """
+
+    _d_: Dict[str, _ValT]
+    RESERVED: Dict[str, str] = {"from_": "from"}
+
+    def __init__(self, d: Dict[str, _ValT]):
+        # assign the inner dict manually to prevent __setattr__ from firing
+        super().__setattr__("_d_", d)
+
+    def __contains__(self, key: object) -> bool:
+        return key in self._d_
+
+    def __nonzero__(self) -> bool:
+        return bool(self._d_)
+
+    __bool__ = __nonzero__
+
+    def __dir__(self) -> List[str]:
+        # introspection for auto-complete in IPython etc
+        return list(self._d_.keys())
+
+    def __eq__(self, other: Any) -> bool:
+        if isinstance(other, AttrDict):
+            return other._d_ == self._d_
+        # make sure we still equal to a dict with the same data
+        return bool(other == self._d_)
+
+    def __ne__(self, other: Any) -> bool:
+        return not self == other
+
+    def __repr__(self) -> str:
+        r = repr(self._d_)
+        if len(r) > 60:
+            r = r[:60] + "...}"
+        return r
+
+    def __getstate__(self) -> Tuple[Dict[str, _ValT]]:
+        return (self._d_,)
+
+    def __setstate__(self, state: Tuple[Dict[str, _ValT]]) -> None:
+        super().__setattr__("_d_", state[0])
+
+    def __getattr__(self, attr_name: str) -> Any:
+        try:
+            return self.__getitem__(attr_name)
+        except KeyError:
+            raise AttributeError(
+                f"{self.__class__.__name__!r} object has no attribute {attr_name!r}"
+            )
+
+    def __delattr__(self, attr_name: str) -> None:
+        try:
+            del self._d_[self.RESERVED.get(attr_name, attr_name)]
+        except KeyError:
+            raise AttributeError(
+                f"{self.__class__.__name__!r} object has no attribute {attr_name!r}"
+            )
+
+    def __getitem__(self, key: str) -> Any:
+        return _wrap(self._d_[self.RESERVED.get(key, key)])
+
+    def __setitem__(self, key: str, value: _ValT) -> None:
+        self._d_[self.RESERVED.get(key, key)] = value
+
+    def __delitem__(self, key: str) -> None:
+        del self._d_[self.RESERVED.get(key, key)]
+
+    def __setattr__(self, name: str, value: _ValT) -> None:
+        # the __orig__class__ attribute has to be treated as an exception, as
+        # is it added to an object when it is instantiated with type arguments
+        if (
+            name in self._d_ or not hasattr(self.__class__, name)
+        ) and name != "__orig_class__":
+            self._d_[self.RESERVED.get(name, name)] = value
+        else:
+            # there is an attribute on the class (could be property, ..) - don't add it as field
+            super().__setattr__(name, value)
+
+    def __iter__(self) -> Iterator[str]:
+        return iter(self._d_)
+
+    def to_dict(self, recursive: bool = False) -> Dict[str, _ValT]:
+        return cast(
+            Dict[str, _ValT], _recursive_to_dict(self._d_) if recursive else self._d_
+        )
+
+    def keys(self) -> Iterable[str]:
+        return self._d_.keys()
+
+    def items(self) -> Iterable[Tuple[str, _ValT]]:
+        return self._d_.items()
+
+
+class DslMeta(type):
+    """
+    Base Metaclass for DslBase subclasses that builds a registry of all classes
+    for given DslBase subclass (== all the query types for the Query subclass
+    of DslBase).
+
+    It then uses the information from that registry (as well as `name` and
+    `shortcut` attributes from the base class) to construct any subclass based
+    on it's name.
+
+    For typical use see `QueryMeta` and `Query` in `elasticsearch.dsl.query`.
+    """
+
+    name: str
+    _classes: Dict[str, type]
+    _type_name: str
+    _types: ClassVar[Dict[str, Type["DslBase"]]] = {}
+
+    def __init__(cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]):
+        super().__init__(name, bases, attrs)
+        # skip for DslBase
+        if not hasattr(cls, "_type_shortcut"):
+            return
+        if not cls.name:
+            # abstract base class, register it's shortcut
+            cls._types[cls._type_name] = cls._type_shortcut
+            # and create a registry for subclasses
+            if not hasattr(cls, "_classes"):
+                cls._classes = {}
+        elif cls.name not in cls._classes:
+            # normal class, register it
+            cls._classes[cls.name] = cls
+
+    @classmethod
+    def get_dsl_type(cls, name: str) -> Type["DslBase"]:
+        try:
+            return cls._types[name]
+        except KeyError:
+            raise UnknownDslObject(f"DSL type {name} does not exist.")
+
+
+class DslBase(metaclass=DslMeta):
+    """
+    Base class for all DSL objects - queries, filters, aggregations etc. Wraps
+    a dictionary representing the object's json.
+
+    Provides several feature:
+        - attribute access to the wrapped dictionary (.field instead of ['field'])
+        - _clone method returning a copy of self
+        - to_dict method to serialize into dict (to be sent via elasticsearch-py)
+        - basic logical operators (&, | and ~) using a Bool(Filter|Query) TODO:
+          move into a class specific for Query/Filter
+        - respects the definition of the class and (de)serializes it's
+          attributes based on the `_param_defs` definition (for example turning
+          all values in the `must` attribute into Query objects)
+    """
+
+    _param_defs: ClassVar[Dict[str, Dict[str, Union[str, bool]]]] = {}
+
+    @classmethod
+    def get_dsl_class(
+        cls: Type[Self], name: str, default: Optional[str] = None
+    ) -> Type[Self]:
+        try:
+            return cls._classes[name]
+        except KeyError:
+            if default is not None:
+                return cls._classes[default]
+            raise UnknownDslObject(
+                f"DSL class `{name}` does not exist in {cls._type_name}."
+            )
+
+    def __init__(self, _expand__to_dot: Optional[bool] = None, **params: Any) -> None:
+        if _expand__to_dot is None:
+            _expand__to_dot = EXPAND__TO_DOT
+        self._params: Dict[str, Any] = {}
+        for pname, pvalue in params.items():
+            if pvalue is DEFAULT:
+                continue
+            # expand "__" to dots
+            if "__" in pname and _expand__to_dot:
+                pname = pname.replace("__", ".")
+            # convert instrumented fields to string
+            if type(pvalue).__name__ == "InstrumentedField":
+                pvalue = str(pvalue)
+            self._setattr(pname, pvalue)
+
+    def _repr_params(self) -> str:
+        """Produce a repr of all our parameters to be used in __repr__."""
+        return ", ".join(
+            f"{n.replace('.', '__')}={v!r}"
+            for (n, v) in sorted(self._params.items())
+            # make sure we don't include empty typed params
+            if "type" not in self._param_defs.get(n, {}) or v
+        )
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}({self._repr_params()})"
+
+    def __eq__(self, other: Any) -> bool:
+        return isinstance(other, self.__class__) and other.to_dict() == self.to_dict()
+
+    def __ne__(self, other: Any) -> bool:
+        return not self == other
+
+    def __setattr__(self, name: str, value: Any) -> None:
+        if name.startswith("_"):
+            return super().__setattr__(name, value)
+        return self._setattr(name, value)
+
+    def _setattr(self, name: str, value: Any) -> None:
+        # if this attribute has special type assigned to it...
+        name = AttrDict.RESERVED.get(name, name)
+        if name in self._param_defs:
+            pinfo = self._param_defs[name]
+
+            if "type" in pinfo:
+                # get the shortcut used to construct this type (query.Q, aggs.A, etc)
+                shortcut = self.__class__.get_dsl_type(str(pinfo["type"]))
+
+                # list of dict(name -> DslBase)
+                if pinfo.get("multi") and pinfo.get("hash"):
+                    if not isinstance(value, (tuple, list)):
+                        value = (value,)
+                    value = list(
+                        {k: shortcut(v) for (k, v) in obj.items()} for obj in value
+                    )
+                elif pinfo.get("multi"):
+                    if not isinstance(value, (tuple, list)):
+                        value = (value,)
+                    value = list(map(shortcut, value))
+
+                # dict(name -> DslBase), make sure we pickup all the objs
+                elif pinfo.get("hash"):
+                    value = {k: shortcut(v) for (k, v) in value.items()}
+
+                # single value object, just convert
+                else:
+                    value = shortcut(value)
+        self._params[name] = value
+
+    def __getattr__(self, name: str) -> Any:
+        if name.startswith("_"):
+            raise AttributeError(
+                f"{self.__class__.__name__!r} object has no attribute {name!r}"
+            )
+
+        value = None
+        try:
+            value = self._params[name]
+        except KeyError:
+            # compound types should never throw AttributeError and return empty
+            # container instead
+            if name in self._param_defs:
+                pinfo = self._param_defs[name]
+                if pinfo.get("multi"):
+                    value = self._params.setdefault(name, [])
+                elif pinfo.get("hash"):
+                    value = self._params.setdefault(name, {})
+        if value is None:
+            raise AttributeError(
+                f"{self.__class__.__name__!r} object has no attribute {name!r}"
+            )
+
+        # wrap nested dicts in AttrDict for convenient access
+        if isinstance(value, dict):
+            return AttrDict(value)
+        return value
+
+    def to_dict(self) -> Dict[str, Any]:
+        """
+        Serialize the DSL object to plain dict
+        """
+        d = {}
+        for pname, value in self._params.items():
+            pinfo = self._param_defs.get(pname)
+
+            # typed param
+            if pinfo and "type" in pinfo:
+                # don't serialize empty lists and dicts for typed fields
+                if value in ({}, []):
+                    continue
+
+                # list of dict(name -> DslBase)
+                if pinfo.get("multi") and pinfo.get("hash"):
+                    value = list(
+                        {k: v.to_dict() for k, v in obj.items()} for obj in value
+                    )
+
+                # multi-values are serialized as list of dicts
+                elif pinfo.get("multi"):
+                    value = list(map(lambda x: x.to_dict(), value))
+
+                # squash all the hash values into one dict
+                elif pinfo.get("hash"):
+                    value = {k: v.to_dict() for k, v in value.items()}
+
+                # serialize single values
+                else:
+                    value = value.to_dict()
+
+            # serialize anything with to_dict method
+            elif hasattr(value, "to_dict"):
+                value = value.to_dict()
+
+            d[pname] = value
+        return {self.name: d}
+
+    def _clone(self) -> Self:
+        c = self.__class__()
+        for attr in self._params:
+            c._params[attr] = copy(self._params[attr])
+        return c
+
+
+if TYPE_CHECKING:
+    HitMetaBase = HitBaseType
+else:
+    HitMetaBase = AttrDict[Any]
+
+
+class HitMeta(HitMetaBase):
+    inner_hits: Mapping[str, Any]
+
+    def __init__(
+        self,
+        document: Dict[str, Any],
+        exclude: Tuple[str, ...] = ("_source", "_fields"),
+    ):
+        d = {
+            k[1:] if k.startswith("_") else k: v
+            for (k, v) in document.items()
+            if k not in exclude
+        }
+        if "type" in d:
+            # make sure we are consistent everywhere in python
+            d["doc_type"] = d.pop("type")
+        super().__init__(d)
+
+
+class ObjectBase(AttrDict[Any]):
+    _doc_type: "DocumentOptions"
+    _index: "IndexBase"
+    meta: HitMeta
+
+    def __init__(self, meta: Optional[Dict[str, Any]] = None, **kwargs: Any):
+        meta = meta or {}
+        for k in list(kwargs):
+            if k.startswith("_") and k[1:] in META_FIELDS:
+                meta[k] = kwargs.pop(k)
+
+        super(AttrDict, self).__setattr__("meta", HitMeta(meta))
+
+        # process field defaults
+        if hasattr(self, "_defaults"):
+            for name in self._defaults:
+                if name not in kwargs:
+                    value = self._defaults[name]
+                    if callable(value):
+                        value = value()
+                    kwargs[name] = value
+
+        super().__init__(kwargs)
+
+    @classmethod
+    def __list_fields(cls) -> Iterator[Tuple[str, "Field", bool]]:
+        """
+        Get all the fields defined for our class, if we have an Index, try
+        looking at the index mappings as well, mark the fields from Index as
+        optional.
+        """
+        for name in cls._doc_type.mapping:
+            field = cls._doc_type.mapping[name]
+            yield name, field, False
+
+        if hasattr(cls.__class__, "_index"):
+            if not cls._index._mapping:
+                return
+            for name in cls._index._mapping:
+                # don't return fields that are in _doc_type
+                if name in cls._doc_type.mapping:
+                    continue
+                field = cls._index._mapping[name]
+                yield name, field, True
+
+    @classmethod
+    def __get_field(cls, name: str) -> Optional["Field"]:
+        try:
+            return cls._doc_type.mapping[name]
+        except KeyError:
+            # fallback to fields on the Index
+            if hasattr(cls, "_index") and cls._index._mapping:
+                try:
+                    return cls._index._mapping[name]
+                except KeyError:
+                    pass
+            return None
+
+    @classmethod
+    def from_es(cls, hit: Union[Dict[str, Any], "ObjectApiResponse[Any]"]) -> Self:
+        meta = hit.copy()
+        data = meta.pop("_source", {})
+        doc = cls(meta=meta)
+        doc._from_dict(data)
+        return doc
+
+    def _from_dict(self, data: Dict[str, Any]) -> None:
+        for k, v in data.items():
+            f = self.__get_field(k)
+            if f and f._coerce:
+                v = f.deserialize(v)
+            setattr(self, k, v)
+
+    def __getstate__(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:  # type: ignore[override]
+        return self.to_dict(), self.meta._d_
+
+    def __setstate__(self, state: Tuple[Dict[str, Any], Dict[str, Any]]) -> None:  # type: ignore[override]
+        data, meta = state
+        super(AttrDict, self).__setattr__("_d_", {})
+        super(AttrDict, self).__setattr__("meta", HitMeta(meta))
+        self._from_dict(data)
+
+    def __getattr__(self, name: str) -> Any:
+        try:
+            return super().__getattr__(name)
+        except AttributeError:
+            f = self.__get_field(name)
+            if f is not None and hasattr(f, "empty"):
+                value = f.empty()
+                if value not in SKIP_VALUES:
+                    setattr(self, name, value)
+                    value = getattr(self, name)
+                return value
+            raise
+
+    def __setattr__(self, name: str, value: Any) -> None:
+        if name in self.__class__._doc_type.mapping:
+            self._d_[name] = value
+        else:
+            super().__setattr__(name, value)
+
+    def to_dict(self, skip_empty: bool = True) -> Dict[str, Any]:
+        out = {}
+        for k, v in self._d_.items():
+            # if this is a mapped field,
+            f = self.__get_field(k)
+            if f and f._coerce:
+                v = f.serialize(v, skip_empty=skip_empty)
+
+            # if someone assigned AttrList, unwrap it
+            if isinstance(v, AttrList):
+                v = v._l_
+
+            if skip_empty:
+                # don't serialize empty values
+                # careful not to include numeric zeros
+                if v in ([], {}, None):
+                    continue
+
+            out[k] = v
+        return out
+
+    def clean_fields(self, validate: bool = True) -> None:
+        errors: Dict[str, List[ValidationException]] = {}
+        for name, field, optional in self.__list_fields():
+            data = self._d_.get(name, None)
+            if data is None and optional:
+                continue
+            try:
+                # save the cleaned value
+                data = field.clean(data)
+            except ValidationException as e:
+                errors.setdefault(name, []).append(e)
+
+            if name in self._d_ or data not in ([], {}, None):
+                self._d_[name] = cast(Any, data)
+
+        if validate and errors:
+            raise ValidationException(errors)
+
+    def clean(self) -> None:
+        pass
+
+    def full_clean(self) -> None:
+        self.clean_fields(validate=False)
+        self.clean()
+        self.clean_fields(validate=True)
+
+
+def merge(
+    data: Union[Dict[str, Any], AttrDict[Any]],
+    new_data: Union[Dict[str, Any], AttrDict[Any]],
+    raise_on_conflict: bool = False,
+) -> None:
+    if not (
+        isinstance(data, (AttrDict, collections.abc.Mapping))
+        and isinstance(new_data, (AttrDict, collections.abc.Mapping))
+    ):
+        raise ValueError(
+            f"You can only merge two dicts! Got {data!r} and {new_data!r} instead."
+        )
+
+    for key, value in new_data.items():
+        if (
+            key in data
+            and isinstance(data[key], (AttrDict, collections.abc.Mapping))
+            and isinstance(value, (AttrDict, collections.abc.Mapping))
+        ):
+            merge(data[key], value, raise_on_conflict)  # type: ignore[arg-type]
+        elif key in data and data[key] != value and raise_on_conflict:
+            raise ValueError(f"Incompatible data for key {key!r}, cannot be merged.")
+        else:
+            data[key] = value
+
+
+def recursive_to_dict(data: Any) -> Any:
+    """Recursively transform objects that potentially have .to_dict()
+    into dictionary literals by traversing AttrList, AttrDict, list,
+    tuple, and Mapping types.
+    """
+    if isinstance(data, AttrList):
+        data = list(data._l_)
+    elif hasattr(data, "to_dict"):
+        data = data.to_dict()
+    if isinstance(data, (list, tuple)):
+        return type(data)(recursive_to_dict(inner) for inner in data)
+    elif isinstance(data, dict):
+        return {key: recursive_to_dict(val) for key, val in data.items()}
+    return data
diff -pruN 8.17.2-2/elasticsearch/dsl/wrappers.py 9.2.0-1/elasticsearch/dsl/wrappers.py
--- 8.17.2-2/elasticsearch/dsl/wrappers.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/dsl/wrappers.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,144 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import operator
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    ClassVar,
+    Dict,
+    Literal,
+    Mapping,
+    Optional,
+    Tuple,
+    TypeVar,
+    Union,
+    cast,
+)
+
+if TYPE_CHECKING:
+    from _operator import _SupportsComparison
+
+from typing_extensions import TypeAlias
+
+from .utils import AttrDict
+
+ComparisonOperators: TypeAlias = Literal["lt", "lte", "gt", "gte"]
+RangeValT = TypeVar("RangeValT", bound="_SupportsComparison")
+
+__all__ = ["Range"]
+
+
+class Range(AttrDict[RangeValT]):
+    OPS: ClassVar[
+        Mapping[
+            ComparisonOperators,
+            Callable[["_SupportsComparison", "_SupportsComparison"], bool],
+        ]
+    ] = {
+        "lt": operator.lt,
+        "lte": operator.le,
+        "gt": operator.gt,
+        "gte": operator.ge,
+    }
+
+    def __init__(
+        self,
+        d: Optional[Dict[str, RangeValT]] = None,
+        /,
+        **kwargs: RangeValT,
+    ):
+        if d is not None and (kwargs or not isinstance(d, dict)):
+            raise ValueError(
+                "Range accepts a single dictionary or a set of keyword arguments."
+            )
+
+        if d is None:
+            data = kwargs
+        else:
+            data = d
+
+        for k in data:
+            if k not in self.OPS:
+                raise ValueError(f"Range received an unknown operator {k!r}")
+
+        if "gt" in data and "gte" in data:
+            raise ValueError("You cannot specify both gt and gte for Range.")
+
+        if "lt" in data and "lte" in data:
+            raise ValueError("You cannot specify both lt and lte for Range.")
+
+        super().__init__(data)
+
+    def __repr__(self) -> str:
+        return "Range(%s)" % ", ".join("%s=%r" % op for op in self._d_.items())
+
+    def __contains__(self, item: object) -> bool:
+        if isinstance(item, str):
+            return super().__contains__(item)
+
+        item_supports_comp = any(hasattr(item, f"__{op}__") for op in self.OPS)
+        if not item_supports_comp:
+            return False
+
+        for op in self.OPS:
+            if op in self._d_ and not self.OPS[op](
+                cast("_SupportsComparison", item), self._d_[op]
+            ):
+                return False
+        return True
+
+    @property
+    def upper(self) -> Union[Tuple[RangeValT, bool], Tuple[None, Literal[False]]]:
+        if "lt" in self._d_:
+            return self._d_["lt"], False
+        if "lte" in self._d_:
+            return self._d_["lte"], True
+        return None, False
+
+    @property
+    def lower(self) -> Union[Tuple[RangeValT, bool], Tuple[None, Literal[False]]]:
+        if "gt" in self._d_:
+            return self._d_["gt"], False
+        if "gte" in self._d_:
+            return self._d_["gte"], True
+        return None, False
+
+
+class AggregationRange(AttrDict[Any]):
+    """
+    :arg from: Start of the range (inclusive).
+    :arg key: Custom key to return the range with.
+    :arg to: End of the range (exclusive).
+    """
+
+    def __init__(
+        self,
+        *,
+        from_: Any = None,
+        key: Optional[str] = None,
+        to: Any = None,
+        **kwargs: Any,
+    ):
+        if from_ is not None:
+            kwargs["from_"] = from_
+        if key is not None:
+            kwargs["key"] = key
+        if to is not None:
+            kwargs["to"] = to
+        super().__init__(kwargs)
diff -pruN 8.17.2-2/elasticsearch/esql/__init__.py 9.2.0-1/elasticsearch/esql/__init__.py
--- 8.17.2-2/elasticsearch/esql/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/esql/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,19 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from ..dsl import E  # noqa: F401
+from .esql import ESQL, ESQLBase, and_, not_, or_  # noqa: F401
diff -pruN 8.17.2-2/elasticsearch/esql/esql.py 9.2.0-1/elasticsearch/esql/esql.py
--- 8.17.2-2/elasticsearch/esql/esql.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/esql/esql.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,1446 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import json
+import re
+from abc import ABC, abstractmethod
+from typing import Any, Dict, List, Optional, Tuple, Type, Union
+
+from ..dsl.document_base import DocumentBase, InstrumentedExpression, InstrumentedField
+
+FieldType = Union[InstrumentedField, str]
+IndexType = Union[Type[DocumentBase], str]
+ExpressionType = Any
+
+
+class ESQL(ABC):
+    """The static methods of the ``ESQL`` class provide access to the ES|QL source
+    commands, used to create ES|QL queries.
+
+    These methods return an instance of class ``ESQLBase``, which provides access to
+    the ES|QL processing commands.
+    """
+
+    @staticmethod
+    def from_(*indices: IndexType) -> "From":
+        """The ``FROM`` source command returns a table with data from a data stream, index, or alias.
+
+        :param indices: A list of indices, data streams or aliases. Supports wildcards and date math.
+
+        Examples::
+
+            query1 = ESQL.from_("employees")
+            query2 = ESQL.from_("<logs-{now/d}>")
+            query3 = ESQL.from_("employees-00001", "other-employees-*")
+            query4 = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*")
+            query5 = ESQL.from_("employees").metadata("_id")
+        """
+        return From(*indices)
+
+    @staticmethod
+    def row(**params: ExpressionType) -> "Row":
+        """The ``ROW`` source command produces a row with one or more columns with values that you specify.
+        This can be useful for testing.
+
+        :param params: the column values to produce, given as keyword arguments.
+
+        Examples::
+
+            query1 = ESQL.row(a=1, b="two", c=None)
+            query2 = ESQL.row(a=[1, 2])
+            query3 = ESQL.row(a=functions.round(1.23, 0))
+        """
+        return Row(**params)
+
+    @staticmethod
+    def show(item: str) -> "Show":
+        """The ``SHOW`` source command returns information about the deployment and its capabilities.
+
+        :param item: Can only be ``INFO``.
+
+        Examples::
+
+            query = ESQL.show("INFO")
+        """
+        return Show(item)
+
+    @staticmethod
+    def ts(*indices: IndexType) -> "TS":
+        """The ``TS`` source command is similar to ``FROM``, but for time series indices.
+
+        :param indices: A list of indices, data streams or aliases. Supports wildcards and date math.
+
+        Examples::
+
+            query = (
+                ESQL.ts("metrics")
+                .where("@timestamp >= now() - 1 day")
+                .stats("SUM(AVG_OVER_TIME(memory_usage)").by("host", "TBUCKET(1 hour)")
+            )
+        """
+        return TS(*indices)
+
+    @staticmethod
+    def branch() -> "Branch":
+        """This method can only be used inside a ``FORK`` command to create each branch.
+
+        Examples::
+
+            query = ESQL.from_("employees").fork(
+                ESQL.branch().where("emp_no == 10001"),
+                ESQL.branch().where("emp_no == 10002"),
+            )
+        """
+        return Branch()
+
+
+class ESQLBase(ABC):
+    """The methods of the ``ESQLBase`` class provide access to the ES|QL processing
+    commands, used to build ES|QL queries.
+    """
+
+    def __init__(self, parent: Optional["ESQLBase"] = None):
+        self._parent = parent
+
+    def __repr__(self) -> str:
+        return self.render()
+
+    def render(self) -> str:
+        return (
+            self._parent.render() + "\n| " if self._parent else ""
+        ) + self._render_internal()
+
+    @abstractmethod
+    def _render_internal(self) -> str:
+        pass
+
+    @staticmethod
+    def _format_index(index: IndexType) -> str:
+        return index._index._name if hasattr(index, "_index") else str(index)
+
+    @staticmethod
+    def _format_id(id: FieldType, allow_patterns: bool = False) -> str:
+        s = str(id)  # in case it is an InstrumentedField
+        if allow_patterns and "*" in s:
+            return s  # patterns cannot be escaped
+        if re.fullmatch(r"[a-zA-Z_@][a-zA-Z0-9_\.]*", s):
+            return s
+        # this identifier needs to be escaped
+        s.replace("`", "``")
+        return f"`{s}`"
+
+    @staticmethod
+    def _format_expr(expr: ExpressionType) -> str:
+        return (
+            json.dumps(expr)
+            if not isinstance(expr, (str, InstrumentedExpression))
+            else str(expr)
+        )
+
+    def _is_forked(self) -> bool:
+        if self.__class__.__name__ == "Fork":
+            return True
+        if self._parent:
+            return self._parent._is_forked()
+        return False
+
+    def change_point(self, value: FieldType) -> "ChangePoint":
+        """``CHANGE_POINT`` detects spikes, dips, and change points in a metric.
+
+        :param value: The column with the metric in which you want to detect a change point.
+
+        Examples::
+
+            query = (
+                ESQL.row(key=list(range(1, 26)))
+                .mv_expand("key")
+                .eval(value=functions.case("key<13", 0, 42))
+                .change_point("value")
+                .on("key")
+                .where("type IS NOT NULL")
+            )
+        """
+        return ChangePoint(self, value)
+
+    def completion(
+        self, *prompt: ExpressionType, **named_prompt: ExpressionType
+    ) -> "Completion":
+        """The ``COMPLETION`` command allows you to send prompts and context to a Large
+        Language Model (LLM) directly within your ES|QL queries, to perform text
+        generation tasks.
+
+        :param prompt: The input text or expression used to prompt the LLM. This can
+                       be a string literal or a reference to a column containing text.
+        :param named_prompt: The input text or expresion, given as a keyword argument.
+                             The argument name is used for the column name. If the
+                             prompt is given as a positional argument, the results will
+                             be stored in a column named ``completion``. If the
+                             specified column already exists, it will be overwritten
+                             with the new results.
+
+        Examples::
+
+            query1 = (
+                ESQL.row(question="What is Elasticsearch?")
+                .completion("question").with_("test_completion_model")
+                .keep("question", "completion")
+            )
+            query2 = (
+                ESQL.row(question="What is Elasticsearch?")
+                .completion(answer="question").with_("test_completion_model")
+                .keep("question", "answer")
+            )
+            query3 = (
+                ESQL.from_("movies")
+                .sort("rating DESC")
+                .limit(10)
+                .eval(prompt=\"\"\"CONCAT(
+                    "Summarize this movie using the following information: \\n",
+                    "Title: ", title, "\\n",
+                    "Synopsis: ", synopsis, "\\n",
+                    "Actors: ", MV_CONCAT(actors, ", "), "\\n",
+                )\"\"\")
+                .completion(summary="prompt").with_("test_completion_model")
+                .keep("title", "summary", "rating")
+            )
+        """
+        return Completion(self, *prompt, **named_prompt)
+
+    def dissect(self, input: FieldType, pattern: str) -> "Dissect":
+        """``DISSECT`` enables you to extract structured data out of a string.
+
+        :param input: The column that contains the string you want to structure. If
+                      the column has multiple values, ``DISSECT`` will process each value.
+        :param pattern: A dissect pattern. If a field name conflicts with an existing
+                        column, the existing column is dropped. If a field name is used
+                        more than once, only the rightmost duplicate creates a column.
+
+        Examples::
+
+            query = (
+                ESQL.row(a="2023-01-23T12:15:00.000Z - some text - 127.0.0.1")
+                .dissect("a", "%{date} - %{msg} - %{ip}")
+                .keep("date", "msg", "ip")
+                .eval(date="TO_DATETIME(date)")
+            )
+        """
+        return Dissect(self, input, pattern)
+
+    def drop(self, *columns: FieldType) -> "Drop":
+        """The ``DROP`` processing command removes one or more columns.
+
+        :param columns: The columns to drop, given as positional arguments. Supports wildcards.
+
+        Examples::
+
+            query1 = ESQL.from_("employees").drop("height")
+            query2 = ESQL.from_("employees").drop("height*")
+        """
+        return Drop(self, *columns)
+
+    def enrich(self, policy: str) -> "Enrich":
+        """``ENRICH`` enables you to add data from existing indices as new columns using an
+        enrich policy.
+
+        :param policy: The name of the enrich policy. You need to create and execute the
+                       enrich policy first.
+
+        Examples::
+
+            query1 = (
+                ESQL.row(a="1")
+                .enrich("languages_policy").on("a").with_("language_name")
+            )
+            query2 = (
+                ESQL.row(a="1")
+                .enrich("languages_policy").on("a").with_(name="language_name")
+            )
+        """
+        return Enrich(self, policy)
+
+    def eval(self, *columns: ExpressionType, **named_columns: ExpressionType) -> "Eval":
+        """The ``EVAL`` processing command enables you to append new columns with calculated values.
+
+        :param columns: The values for the columns, given as positional arguments. Can be literals,
+                        expressions, or functions. Can use columns defined left of this one.
+        :param named_columns: The values for the new columns, given as keyword arguments. The name
+                              of the arguments is used as column name. If a column with the same
+                              name already exists, the existing column is dropped. If a column name
+                              is used more than once, only the rightmost duplicate creates a column.
+
+        Examples::
+
+            query1 = (
+                ESQL.from_("employees")
+                .sort("emp_no")
+                .keep("first_name", "last_name", "height")
+                .eval(height_feet="height * 3.281", height_cm="height * 100")
+            )
+            query2 = (
+                ESQL.from_("employees")
+                .eval("height * 3.281")
+                .stats(avg_height_feet=functions.avg("`height * 3.281`"))
+            )
+        """
+        return Eval(self, *columns, **named_columns)
+
+    def fork(
+        self,
+        fork1: "Branch",
+        fork2: Optional["Branch"] = None,
+        fork3: Optional["Branch"] = None,
+        fork4: Optional["Branch"] = None,
+        fork5: Optional["Branch"] = None,
+        fork6: Optional["Branch"] = None,
+        fork7: Optional["Branch"] = None,
+        fork8: Optional["Branch"] = None,
+    ) -> "Fork":
+        """The ``FORK`` processing command creates multiple execution branches to operate on the
+        same input data and combines the results in a single output table.
+
+        :param fork<n>: Up to 8 execution branches, created with the ``ESQL.branch()`` method.
+
+        Examples::
+
+            query = (
+                ESQL.from_("employees")
+                .fork(
+                    ESQL.branch().where("emp_no == 10001"),
+                    ESQL.branch().where("emp_no == 10002"),
+                )
+                .keep("emp_no", "_fork")
+                .sort("emp_no")
+            )
+        """
+        if self._is_forked():
+            raise ValueError("a query can only have one fork")
+        return Fork(self, fork1, fork2, fork3, fork4, fork5, fork6, fork7, fork8)
+
+    def fuse(self, method: Optional[str] = None) -> "Fuse":
+        """The ``FUSE`` processing command merges rows from multiple result sets and assigns
+        new relevance scores.
+
+        :param method: Defaults to ``RRF``. Can be one of ``RRF`` (for Reciprocal Rank Fusion)
+                       or ``LINEAR`` (for linear combination of scores). Designates which
+                       method to use to assign new relevance scores.
+
+        Examples::
+
+            query1 = (
+                ESQL.from_("books").metadata("_id", "_index", "_score")
+                .fork(
+                    ESQL.branch().where('title:"Shakespeare"').sort("_score DESC"),
+                    ESQL.branch().where('semantic_title:"Shakespeare"').sort("_score DESC"),
+                )
+                .fuse()
+            )
+            query2 = (
+                ESQL.from_("books").metadata("_id", "_index", "_score")
+                .fork(
+                    ESQL.branch().where('title:"Shakespeare"').sort("_score DESC"),
+                    ESQL.branch().where('semantic_title:"Shakespeare"').sort("_score DESC"),
+                )
+                .fuse("linear")
+            )
+            query3 = (
+                ESQL.from_("books").metadata("_id", "_index", "_score")
+                .fork(
+                    ESQL.branch().where('title:"Shakespeare"').sort("_score DESC"),
+                    ESQL.branch().where('semantic_title:"Shakespeare"').sort("_score DESC"),
+                )
+                .fuse("linear").by("title", "description")
+            )
+            query4 = (
+                ESQL.from_("books").metadata("_id", "_index", "_score")
+                .fork(
+                    ESQL.branch().where('title:"Shakespeare"').sort("_score DESC"),
+                    ESQL.branch().where('semantic_title:"Shakespeare"').sort("_score DESC"),
+                )
+                .fuse("linear").with_(normalizer="minmax")
+            )
+        """
+        return Fuse(self, method)
+
+    def grok(self, input: FieldType, pattern: str) -> "Grok":
+        """``GROK`` enables you to extract structured data out of a string.
+
+        :param input: The column that contains the string you want to structure. If the
+                      column has multiple values, ``GROK`` will process each value.
+        :param pattern: A grok pattern. If a field name conflicts with an existing column,
+                        the existing column is discarded. If a field name is used more than
+                        once, a multi-valued column will be created with one value per each
+                        occurrence of the field name.
+
+        Examples::
+
+            query1 = (
+                ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42")
+                .grok("a", "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}")
+                .keep("date", "ip", "email", "num")
+            )
+            query2 = (
+                ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42")
+                .grok(
+                    "a",
+                    "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}",
+                )
+                .keep("date", "ip", "email", "num")
+                .eval(date=functions.to_datetime("date"))
+            )
+            query3 = (
+                ESQL.from_("addresses")
+                .keep("city.name", "zip_code")
+                .grok("zip_code", "%{WORD:zip_parts} %{WORD:zip_parts}")
+            )
+        """
+        return Grok(self, input, pattern)
+
+    def inline_stats(
+        self, *expressions: ExpressionType, **named_expressions: ExpressionType
+    ) -> "Stats":
+        """The ``INLINE STATS`` processing command groups rows according to a common value
+        and calculates one or more aggregated values over the grouped rows.
+
+        The command is identical to ``STATS`` except that it preserves all the columns from
+        the input table.
+
+        :param expressions: A list of expressions, given as positional arguments.
+        :param named_expressions: A list of expressions, given as keyword arguments. The
+                                  argument names are used for the returned aggregated values.
+
+        Note that only one of ``expressions`` and ``named_expressions`` must be provided.
+
+        Examples::
+
+            query1 = (
+                ESQL.from_("employees")
+                .keep("emp_no", "languages", "salary")
+                .inline_stats(max_salary=functions.max(E("salary"))).by("languages")
+            )
+            query2 = (
+                ESQL.from_("employees")
+                .keep("emp_no", "languages", "salary")
+                .inline_stats(max_salary=functions.max(E("salary")))
+            )
+            query3 = (
+                ESQL.from_("employees")
+                .where("still_hired")
+                .keep("emp_no", "languages", "salary", "hire_date")
+                .eval(tenure=functions.date_diff("year", E("hire_date"), "2025-09-18T00:00:00"))
+                .drop("hire_date")
+                .inline_stats(
+                    avg_salary=functions.avg(E("salary")),
+                    count=functions.count(E("*")),
+                )
+                .by("languages", "tenure")
+            )
+            query4 = (
+                ESQL.from_("employees")
+                .keep("emp_no", "salary")
+                .inline_stats(
+                    avg_lt_50=functions.round(functions.avg(E("salary"))).where(E("salary") < 50000),
+                    avg_lt_60=functions.round(functions.avg(E("salary"))).where(E("salary") >= 50000, E("salary") < 60000),
+                    avg_gt_60=functions.round(functions.avg(E("salary"))).where(E("salary") >= 60000),
+                )
+            )
+
+        """
+        return InlineStats(self, *expressions, **named_expressions)
+
+    def keep(self, *columns: FieldType) -> "Keep":
+        """The ``KEEP`` processing command enables you to specify what columns are returned
+        and the order in which they are returned.
+
+        :param columns: The columns to keep, given as positional arguments. Supports
+                        wildcards.
+
+        Examples::
+
+            query1 = ESQL.from_("employees").keep("emp_no", "first_name", "last_name", "height")
+            query2 = ESQL.from_("employees").keep("h*")
+            query3 = ESQL.from_("employees").keep("h*", "*")
+        """
+        return Keep(self, *columns)
+
+    def limit(self, max_number_of_rows: int) -> "Limit":
+        """The ``LIMIT`` processing command enables you to limit the number of rows that are
+        returned.
+
+        :param max_number_of_rows: The maximum number of rows to return.
+
+        Examples::
+
+            query1 = ESQL.from_("employees").sort("emp_no ASC").limit(5)
+            query2 = ESQL.from_("index").stats(functions.avg("field1")).by("field2").limit(20000)
+        """
+        return Limit(self, max_number_of_rows)
+
+    def lookup_join(self, lookup_index: IndexType) -> "LookupJoin":
+        """``LOOKUP JOIN`` enables you to add data from another index, AKA a 'lookup' index,
+        to your ES|QL query results, simplifying data enrichment and analysis workflows.
+
+        :param lookup_index: The name of the lookup index. This must be a specific index
+                             name - wildcards, aliases, and remote cluster references are
+                             not supported. Indices used for lookups must be configured
+                             with the lookup index mode.
+
+        Examples::
+
+            query1 = (
+                ESQL.from_("firewall_logs")
+                .lookup_join("threat_list").on("source.IP")
+                .where("threat_level IS NOT NULL")
+            )
+            query2 = (
+                ESQL.from_("system_metrics")
+                .lookup_join("host_inventory").on("host.name")
+                .lookup_join("ownerships").on("host.name")
+            )
+            query3 = (
+                ESQL.from_("app_logs")
+                .lookup_join("service_owners").on("service_id")
+            )
+            query4 = (
+                ESQL.from_("employees")
+                .eval(language_code="languages")
+                .where("emp_no >= 10091 AND emp_no < 10094")
+                .lookup_join("languages_lookup").on("language_code")
+            )
+        """
+        return LookupJoin(self, lookup_index)
+
+    def mv_expand(self, column: FieldType) -> "MvExpand":
+        """The ``MV_EXPAND`` processing command expands multivalued columns into one row per
+        value, duplicating other columns.
+
+        :param column: The multivalued column to expand.
+
+        Examples::
+
+            query = ESQL.row(a=[1, 2, 3], b="b", j=["a", "b"]).mv_expand("a")
+        """
+        return MvExpand(self, column)
+
+    def rename(self, **columns: FieldType) -> "Rename":
+        """The ``RENAME`` processing command renames one or more columns.
+
+        :param columns: The old and new column name pairs, given as keyword arguments.
+                        If a name conflicts with an existing column name, the existing column
+                        is dropped. If multiple columns are renamed to the same name, all but
+                        the rightmost column with the same new name are dropped.
+
+        Examples::
+
+            query = (
+                ESQL.from_("employees")
+                .keep("first_name", "last_name", "still_hired")
+                .rename(still_hired="employed")
+            )
+        """
+        return Rename(self, **columns)
+
+    def rerank(self, *query: ExpressionType, **named_query: ExpressionType) -> "Rerank":
+        """The ``RERANK`` command uses an inference model to compute a new relevance score
+        for an initial set of documents, directly within your ES|QL queries.
+
+        :param query: The query text used to rerank the documents. This is typically the
+                      same query used in the initial search.
+        :param named_query: The query text used to rerank the documents, given as a
+                            keyword argument. The argument name is used for the column
+                            name. If the query is given as a positional argument, the
+                            results will be stored in a column named ``_score``. If the
+                            specified column already exists, it will be overwritten with
+                            the new results.
+
+        Examples::
+
+            query1 = (
+                ESQL.from_("books").metadata("_score")
+                .where('MATCH(description, "hobbit")')
+                .sort("_score DESC")
+                .limit(100)
+                .rerank("hobbit").on("description").with_(inference_id="test_reranker")
+                .limit(3)
+                .keep("title", "_score")
+            )
+            query2 = (
+                ESQL.from_("books").metadata("_score")
+                .where('MATCH(description, "hobbit") OR MATCH(author, "Tolkien")')
+                .sort("_score DESC")
+                .limit(100)
+                .rerank(rerank_score="hobbit").on("description", "author").with_(inference_id="test_reranker")
+                .sort("rerank_score")
+                .limit(3)
+                .keep("title", "_score", "rerank_score")
+            )
+            query3 = (
+                ESQL.from_("books").metadata("_score")
+                .where('MATCH(description, "hobbit") OR MATCH(author, "Tolkien")')
+                .sort("_score DESC")
+                .limit(100)
+                .rerank(rerank_score="hobbit").on("description", "author").with_(inference_id="test_reranker")
+                .eval(original_score="_score", _score="rerank_score + original_score")
+                .sort("_score")
+                .limit(3)
+                .keep("title", "original_score", "rerank_score", "_score")
+            )
+        """
+        return Rerank(self, *query, **named_query)
+
+    def sample(self, probability: float) -> "Sample":
+        """The ``SAMPLE`` command samples a fraction of the table rows.
+
+        :param probability: The probability that a row is included in the sample. The value
+                            must be between 0 and 1, exclusive.
+
+        Examples::
+
+            query = ESQL.from_("employees").keep("emp_no").sample(0.05)
+        """
+        return Sample(self, probability)
+
+    def sort(self, *columns: ExpressionType) -> "Sort":
+        """The ``SORT`` processing command sorts a table on one or more columns.
+
+        :param columns: The columns to sort on.
+
+        Examples::
+
+            query1 = (
+                ESQL.from_("employees")
+                .keep("first_name", "last_name", "height")
+                .sort("height")
+            )
+            query2 =  (
+                ESQL.from_("employees")
+                .keep("first_name", "last_name", "height")
+                .sort("height DESC")
+            )
+            query3 = (
+                ESQL.from_("employees")
+                .keep("first_name", "last_name", "height")
+                .sort("height DESC", "first_name ASC")
+            )
+            query4 = (
+                ESQL.from_("employees")
+                .keep("first_name", "last_name", "height")
+                .sort("first_name ASC NULLS FIRST")
+            )
+        """
+        return Sort(self, *columns)
+
+    def stats(
+        self, *expressions: ExpressionType, **named_expressions: ExpressionType
+    ) -> "Stats":
+        """The ``STATS`` processing command groups rows according to a common value and
+        calculates one or more aggregated values over the grouped rows.
+
+        :param expressions: A list of expressions, given as positional arguments.
+        :param named_expressions: A list of expressions, given as keyword arguments. The
+                                  argument names are used for the returned aggregated values.
+
+        Note that only one of ``expressions`` and ``named_expressions`` must be provided.
+
+        Examples::
+
+            query1 = (
+                ESQL.from_("employees")
+                .stats(count=functions.count("emp_no")).by("languages")
+                .sort("languages")
+            )
+            query2 = (
+                ESQL.from_("employees")
+                .stats(avg_lang=functions.avg("languages"))
+            )
+            query3 = (
+                ESQL.from_("employees")
+                .stats(
+                    avg_lang=functions.avg("languages"),
+                    max_lang=functions.max("languages")
+                )
+            )
+            query4 = (
+                ESQL.from_("employees")
+                .stats(
+                    avg50s=functions.avg("salary").where('birth_date < "1960-01-01"'),
+                    avg60s=functions.avg("salary").where('birth_date >= "1960-01-01"'),
+                ).by("gender")
+                .sort("gender")
+            )
+            query5 = (
+                ESQL.from_("employees")
+                .eval(Ks="salary / 1000")
+                .stats(
+                    under_40K=functions.count("*").where("Ks < 40"),
+                    inbetween=functions.count("*").where("40 <= Ks AND Ks < 60"),
+                    over_60K=functions.count("*").where("60 <= Ks"),
+                    total=f.count("*")
+                )
+            )
+            query6 = (
+                ESQL.row(i=1, a=["a", "b"])
+                .stats(functions.min("i")).by("a")
+                .sort("a ASC")
+            )
+            query7 = (
+                ESQL.from_("employees")
+                .eval(hired=functions.date_format("hire_date", "yyyy"))
+                .stats(avg_salary=functions.avg("salary")).by("hired", "languages.long")
+                .eval(avg_salary=functions.round("avg_salary"))
+                .sort("hired", "languages.long")
+
+            )
+        """
+        return Stats(self, *expressions, **named_expressions)
+
+    def where(self, *expressions: ExpressionType) -> "Where":
+        """The ``WHERE`` processing command produces a table that contains all the rows
+        from the input table for which the provided condition evaluates to ``true``.
+
+        :param expressions: A list of boolean expressions, given as positional arguments.
+                            These expressions are combined with an ``AND`` logical operator.
+
+        Examples::
+
+            query1 = (
+                ESQL.from_("employees")
+                .keep("first_name", "last_name", "still_hired")
+                .where("still_hired == true")
+            )
+            query2 = (
+                ESQL.from_("sample_data")
+                .where("@timestamp > NOW() - 1 hour")
+            )
+            query3 = (
+                ESQL.from_("employees")
+                .keep("first_name", "last_name", "height")
+                .where("LENGTH(first_name) < 4")
+            )
+        """
+        return Where(self, *expressions)
+
+
+class From(ESQLBase):
+    """Implementation of the ``FROM`` source command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    command_name = "FROM"
+
+    def __init__(self, *indices: IndexType):
+        super().__init__()
+        self._indices = indices
+        self._metadata_fields: Tuple[FieldType, ...] = tuple()
+
+    def metadata(self, *fields: FieldType) -> "From":
+        """Continuation of the ``FROM`` and ``TS`` source commands.
+
+        :param fields: metadata fields to retrieve, given as positional arguments.
+        """
+        self._metadata_fields = fields
+        return self
+
+    def _render_internal(self) -> str:
+        indices = [self._format_index(index) for index in self._indices]
+        s = f'{self.command_name} {", ".join(indices)}'
+        if self._metadata_fields:
+            s = (
+                s
+                + f' METADATA {", ".join([self._format_id(field) for field in self._metadata_fields])}'
+            )
+        return s
+
+
+class Row(ESQLBase):
+    """Implementation of the ``ROW`` source command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, **params: ExpressionType):
+        super().__init__()
+        self._params = {
+            self._format_id(k): (
+                json.dumps(v)
+                if not isinstance(v, InstrumentedExpression)
+                else self._format_expr(v)
+            )
+            for k, v in params.items()
+        }
+
+    def _render_internal(self) -> str:
+        return "ROW " + ", ".join([f"{k} = {v}" for k, v in self._params.items()])
+
+
+class Show(ESQLBase):
+    """Implementation of the ``SHOW`` source command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    which makes it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, item: str):
+        super().__init__()
+        self._item = item
+
+    def _render_internal(self) -> str:
+        return f"SHOW {self._format_id(self._item)}"
+
+
+class TS(From):
+    """Implementation of the ``TS`` source command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    command_name = "TS"
+
+
+class Branch(ESQLBase):
+    """Implementation of a branch inside a ``FORK`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    which makes it possible to chain all the commands that belong to the branch
+    in a single expression.
+    """
+
+    def _render_internal(self) -> str:
+        return ""
+
+
+class ChangePoint(ESQLBase):
+    """Implementation of the ``CHANGE POINT`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, value: FieldType):
+        super().__init__(parent)
+        self._value = value
+        self._key: Optional[FieldType] = None
+        self._type_name: Optional[str] = None
+        self._pvalue_name: Optional[str] = None
+
+    def on(self, key: FieldType) -> "ChangePoint":
+        """Continuation of the ``CHANGE_POINT`` command.
+
+        :param key: The column with the key to order the values by. If not specified,
+                    ``@timestamp`` is used.
+        """
+        self._key = key
+        return self
+
+    def as_(self, type_name: str, pvalue_name: str) -> "ChangePoint":
+        """Continuation of the ``CHANGE_POINT`` command.
+
+        :param type_name: The name of the output column with the change point type.
+                          If not specified, ``type`` is used.
+        :param pvalue_name: The name of the output column with the p-value that indicates
+                            how extreme the change point is. If not specified, ``pvalue``
+                            is used.
+        """
+        self._type_name = type_name
+        self._pvalue_name = pvalue_name
+        return self
+
+    def _render_internal(self) -> str:
+        key = "" if not self._key else f" ON {self._format_id(self._key)}"
+        names = (
+            ""
+            if not self._type_name and not self._pvalue_name
+            else f' AS {self._format_id(self._type_name or "type")}, {self._format_id(self._pvalue_name or "pvalue")}'
+        )
+        return f"CHANGE_POINT {self._value}{key}{names}"
+
+
+class Completion(ESQLBase):
+    """Implementation of the ``COMPLETION`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(
+        self, parent: ESQLBase, *prompt: ExpressionType, **named_prompt: ExpressionType
+    ):
+        if len(prompt) + len(named_prompt) > 1:
+            raise ValueError(
+                "this method requires either one positional or one keyword argument only"
+            )
+        super().__init__(parent)
+        self._prompt = prompt
+        self._named_prompt = named_prompt
+        self._inference_id: Optional[str] = None
+
+    def with_(self, inference_id: str) -> "Completion":
+        """Continuation of the ``COMPLETION`` command.
+
+        :param inference_id: The ID of the inference endpoint to use for the task. The
+                             inference endpoint must be configured with the ``completion``
+                             task type.
+        """
+        self._inference_id = inference_id
+        return self
+
+    def _render_internal(self) -> str:
+        if self._inference_id is None:
+            raise ValueError("The completion command requires an inference ID")
+        with_ = {"inference_id": self._inference_id}
+        if self._named_prompt:
+            column = list(self._named_prompt.keys())[0]
+            prompt = list(self._named_prompt.values())[0]
+            return f"COMPLETION {self._format_id(column)} = {self._format_id(prompt)} WITH {json.dumps(with_)}"
+        else:
+            return f"COMPLETION {self._format_id(self._prompt[0])} WITH {json.dumps(with_)}"
+
+
+class Dissect(ESQLBase):
+    """Implementation of the ``DISSECT`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, input: FieldType, pattern: str):
+        super().__init__(parent)
+        self._input = input
+        self._pattern = pattern
+        self._separator: Optional[str] = None
+
+    def append_separator(self, separator: str) -> "Dissect":
+        """Continuation of the ``DISSECT`` command.
+
+        :param separator: A string used as the separator between appended values,
+                          when using the append modifier.
+        """
+        self._separator = separator
+        return self
+
+    def _render_internal(self) -> str:
+        sep = (
+            ""
+            if self._separator is None
+            else f" APPEND_SEPARATOR={json.dumps(self._separator)}"
+        )
+        return (
+            f"DISSECT {self._format_id(self._input)} {json.dumps(self._pattern)}{sep}"
+        )
+
+
+class Drop(ESQLBase):
+    """Implementation of the ``DROP`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, *columns: FieldType):
+        super().__init__(parent)
+        self._columns = columns
+
+    def _render_internal(self) -> str:
+        return f'DROP {", ".join([self._format_id(col, allow_patterns=True) for col in self._columns])}'
+
+
+class Enrich(ESQLBase):
+    """Implementation of the ``ENRICH`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, policy: str):
+        super().__init__(parent)
+        self._policy = policy
+        self._match_field: Optional[FieldType] = None
+        self._fields: Optional[Tuple[FieldType, ...]] = None
+        self._named_fields: Optional[Dict[str, FieldType]] = None
+
+    def on(self, match_field: FieldType) -> "Enrich":
+        """Continuation of the ``ENRICH`` command.
+
+        :param match_field: The match field. ``ENRICH`` uses its value to look for records
+                            in the enrich index. If not specified, the match will be
+                            performed on the column with the same name as the
+                            ``match_field`` defined in the enrich policy.
+        """
+        self._match_field = match_field
+        return self
+
+    def with_(self, *fields: FieldType, **named_fields: FieldType) -> "Enrich":
+        """Continuation of the ``ENRICH`` command.
+
+        :param fields: The enrich fields from the enrich index that are added to the result
+                       as new columns, given as positional arguments. If a column with the
+                       same name as the enrich field already exists, the existing column will
+                       be replaced by the new column. If not specified, each of the enrich
+                       fields defined in the policy is added. A column with the same name as
+                       the enrich field will be dropped unless the enrich field is renamed.
+        :param named_fields: The enrich fields from the enrich index that are added to the
+                             result as new columns, given as keyword arguments. The name of
+                             the keyword arguments are used as column names. If a column has
+                             the same name as the new name, it will be discarded. If a name
+                             (new or original) occurs more than once, only the rightmost
+                             duplicate creates a new column.
+        """
+        if fields and named_fields:
+            raise ValueError(
+                "this method supports positional or keyword arguments but not both"
+            )
+        self._fields = fields
+        self._named_fields = named_fields
+        return self
+
+    def _render_internal(self) -> str:
+        on = (
+            ""
+            if self._match_field is None
+            else f" ON {self._format_id(self._match_field)}"
+        )
+        with_ = ""
+        if self._named_fields:
+            with_ = f' WITH {", ".join([f"{self._format_id(name)} = {self._format_id(field)}" for name, field in self._named_fields.items()])}'
+        elif self._fields is not None:
+            with_ = (
+                f' WITH {", ".join([self._format_id(field) for field in self._fields])}'
+            )
+        return f"ENRICH {self._policy}{on}{with_}"
+
+
+class Eval(ESQLBase):
+    """Implementation of the ``EVAL`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(
+        self,
+        parent: ESQLBase,
+        *columns: ExpressionType,
+        **named_columns: ExpressionType,
+    ):
+        if columns and named_columns:
+            raise ValueError(
+                "this method supports positional or keyword arguments but not both"
+            )
+        super().__init__(parent)
+        self._columns = columns or named_columns
+
+    def _render_internal(self) -> str:
+        if isinstance(self._columns, dict):
+            cols = ", ".join(
+                [
+                    f"{self._format_id(name)} = {self._format_expr(value)}"
+                    for name, value in self._columns.items()
+                ]
+            )
+        else:
+            cols = ", ".join([f"{self._format_expr(col)}" for col in self._columns])
+        return f"EVAL {cols}"
+
+
+class Fork(ESQLBase):
+    """Implementation of the ``FORK`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(
+        self,
+        parent: ESQLBase,
+        fork1: "Branch",
+        fork2: Optional["Branch"] = None,
+        fork3: Optional["Branch"] = None,
+        fork4: Optional["Branch"] = None,
+        fork5: Optional["Branch"] = None,
+        fork6: Optional["Branch"] = None,
+        fork7: Optional["Branch"] = None,
+        fork8: Optional["Branch"] = None,
+    ):
+        super().__init__(parent)
+        self._branches = [fork1, fork2, fork3, fork4, fork5, fork6, fork7, fork8]
+
+    def _render_internal(self) -> str:
+        cmds = ""
+        for branch in self._branches:
+            if branch:
+                cmd = branch.render()[3:].replace("\n", " ")
+                if cmds == "":
+                    cmds = f"( {cmd} )"
+                else:
+                    cmds += f"\n       ( {cmd} )"
+        return f"FORK {cmds}"
+
+
+class Fuse(ESQLBase):
+    """Implementation of the ``FUSE`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, method: Optional[str] = None):
+        super().__init__(parent)
+        self.method = method
+        self.by_columns: List[FieldType] = []
+        self.options: Dict[str, Any] = {}
+
+    def by(self, *columns: FieldType) -> "Fuse":
+        self.by_columns += list(columns)
+        return self
+
+    def with_(self, **options: Any) -> "Fuse":
+        self.options = options
+        return self
+
+    def _render_internal(self) -> str:
+        method = f" {self.method.upper()}" if self.method else ""
+        by = (
+            " " + " ".join([f"BY {column}" for column in self.by_columns])
+            if self.by_columns
+            else ""
+        )
+        with_ = " WITH " + json.dumps(self.options) if self.options else ""
+        return f"FUSE{method}{by}{with_}"
+
+
+class Grok(ESQLBase):
+    """Implementation of the ``GROK`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, input: FieldType, pattern: str):
+        super().__init__(parent)
+        self._input = input
+        self._pattern = pattern
+
+    def _render_internal(self) -> str:
+        return f"GROK {self._format_id(self._input)} {json.dumps(self._pattern)}"
+
+
+class Keep(ESQLBase):
+    """Implementation of the ``KEEP`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, *columns: FieldType):
+        super().__init__(parent)
+        self._columns = columns
+
+    def _render_internal(self) -> str:
+        return f'KEEP {", ".join([f"{self._format_id(col, allow_patterns=True)}" for col in self._columns])}'
+
+
+class Limit(ESQLBase):
+    """Implementation of the ``LIMIT`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, max_number_of_rows: int):
+        super().__init__(parent)
+        self._max_number_of_rows = max_number_of_rows
+
+    def _render_internal(self) -> str:
+        return f"LIMIT {json.dumps(self._max_number_of_rows)}"
+
+
+class LookupJoin(ESQLBase):
+    """Implementation of the ``LOOKUP JOIN`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, lookup_index: IndexType):
+        super().__init__(parent)
+        self._lookup_index = lookup_index
+        self._field: Optional[FieldType] = None
+
+    def on(self, field: FieldType) -> "LookupJoin":
+        """Continuation of the ``LOOKUP JOIN`` command.
+
+        :param field: The field to join on. This field must exist in both your current query
+                      results and in the lookup index. If the field contains multi-valued
+                      entries, those entries will not match anything (the added fields will
+                      contain null for those rows).
+        """
+        self._field = field
+        return self
+
+    def _render_internal(self) -> str:
+        if self._field is None:
+            raise ValueError("Joins require a field to join on.")
+        index = (
+            self._lookup_index
+            if isinstance(self._lookup_index, str)
+            else self._lookup_index._index._name
+        )
+        return (
+            f"LOOKUP JOIN {self._format_index(index)} ON {self._format_id(self._field)}"
+        )
+
+
+class MvExpand(ESQLBase):
+    """Implementation of the ``MV_EXPAND`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, column: FieldType):
+        super().__init__(parent)
+        self._column = column
+
+    def _render_internal(self) -> str:
+        return f"MV_EXPAND {self._format_id(self._column)}"
+
+
+class Rename(ESQLBase):
+    """Implementation of the ``RENAME`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, **columns: FieldType):
+        super().__init__(parent)
+        self._columns = columns
+
+    def _render_internal(self) -> str:
+        return f'RENAME {", ".join([f"{self._format_id(old_name)} AS {self._format_id(new_name)}" for old_name, new_name in self._columns.items()])}'
+
+
+class Rerank(ESQLBase):
+    """Implementation of the ``RERANK`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(
+        self, parent: ESQLBase, *query: ExpressionType, **named_query: ExpressionType
+    ):
+        if len(query) + len(named_query) > 1:
+            raise ValueError(
+                "this method requires either one positional or one keyword argument only"
+            )
+        super().__init__(parent)
+        self._query = query
+        self._named_query = named_query
+        self._fields: Optional[Tuple[str, ...]] = None
+        self._inference_id: Optional[str] = None
+
+    def on(self, *fields: str) -> "Rerank":
+        """Continuation of the ``RERANK`` command.
+
+        :param fields: One or more fields to use for reranking. These fields should
+                       contain the text that the reranking model will evaluate.
+        """
+        self._fields = fields
+        return self
+
+    def with_(self, inference_id: str) -> "Rerank":
+        """Continuation of the ``RERANK`` command.
+
+        :param inference_id: The ID of the inference endpoint to use for the task. The
+                             inference endpoint must be configured with the ``rerank``
+                             task type.
+        """
+        self._inference_id = inference_id
+        return self
+
+    def _render_internal(self) -> str:
+        if self._fields is None:
+            raise ValueError(
+                "The rerank command requires one or more fields to rerank on"
+            )
+        if self._inference_id is None:
+            raise ValueError("The completion command requires an inference ID")
+        with_ = {"inference_id": self._inference_id}
+        if self._named_query:
+            column = list(self._named_query.keys())[0]
+            query = list(self._named_query.values())[0]
+            query = f"{self._format_id(column)} = {json.dumps(query)}"
+        else:
+            query = json.dumps(self._query[0])
+        return (
+            f"RERANK {query} "
+            f"ON {', '.join([self._format_id(field) for field in self._fields])} "
+            f"WITH {json.dumps(with_)}"
+        )
+
+
+class Sample(ESQLBase):
+    """Implementation of the ``SAMPLE`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, probability: float):
+        super().__init__(parent)
+        self._probability = probability
+
+    def _render_internal(self) -> str:
+        return f"SAMPLE {json.dumps(self._probability)}"
+
+
+class Sort(ESQLBase):
+    """Implementation of the ``SORT`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, *columns: ExpressionType):
+        super().__init__(parent)
+        self._columns = columns
+
+    def _render_internal(self) -> str:
+        sorts = [
+            " ".join([self._format_id(term) for term in str(col).split(" ")])
+            for col in self._columns
+        ]
+        return f'SORT {", ".join([f"{sort}" for sort in sorts])}'
+
+
+class Stats(ESQLBase):
+    """Implementation of the ``STATS`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    command_name = "STATS"
+
+    def __init__(
+        self,
+        parent: ESQLBase,
+        *expressions: ExpressionType,
+        **named_expressions: ExpressionType,
+    ):
+        if expressions and named_expressions:
+            raise ValueError(
+                "this method supports positional or keyword arguments but not both"
+            )
+        super().__init__(parent)
+        self._expressions = expressions or named_expressions
+        self._grouping_expressions: Optional[Tuple[ExpressionType, ...]] = None
+
+    def by(self, *grouping_expressions: ExpressionType) -> "Stats":
+        """Continuation of the ``STATS`` and ``INLINE STATS`` commands.
+
+        :param grouping_expressions: Expressions that output the values to group by.
+                                     If their names coincide with one of the computed
+                                     columns, that column will be ignored.
+        """
+        self._grouping_expressions = grouping_expressions
+        return self
+
+    def _render_internal(self) -> str:
+        if isinstance(self._expressions, dict):
+            exprs = [
+                f"{self._format_id(key)} = {self._format_expr(value)}"
+                for key, value in self._expressions.items()
+            ]
+        else:
+            exprs = [f"{self._format_expr(expr)}" for expr in self._expressions]
+        indent = " " * (len(self.command_name) + 3)
+        expression_separator = f",\n{indent}"
+        by = (
+            ""
+            if self._grouping_expressions is None
+            else f'\n{indent}BY {", ".join([f"{self._format_expr(expr)}" for expr in self._grouping_expressions])}'
+        )
+        return f'{self.command_name} {expression_separator.join([f"{expr}" for expr in exprs])}{by}'
+
+
+class InlineStats(Stats):
+    """Implementation of the ``INLINE STATS`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    command_name = "INLINE STATS"
+
+
+class Where(ESQLBase):
+    """Implementation of the ``WHERE`` processing command.
+
+    This class inherits from :class:`ESQLBase <elasticsearch.esql.esql.ESQLBase>`,
+    to make it possible to chain all the commands that belong to an ES|QL query
+    in a single expression.
+    """
+
+    def __init__(self, parent: ESQLBase, *expressions: ExpressionType):
+        super().__init__(parent)
+        self._expressions = expressions
+
+    def _render_internal(self) -> str:
+        return f'WHERE {" AND ".join([f"{self._format_expr(expr)}" for expr in self._expressions])}'
+
+
+def and_(*expressions: InstrumentedExpression) -> "InstrumentedExpression":
+    """Combine two or more expressions with the AND operator."""
+    return InstrumentedExpression(" AND ".join([f"({expr})" for expr in expressions]))
+
+
+def or_(*expressions: InstrumentedExpression) -> "InstrumentedExpression":
+    """Combine two or more expressions with the OR operator."""
+    return InstrumentedExpression(" OR ".join([f"({expr})" for expr in expressions]))
+
+
+def not_(expression: InstrumentedExpression) -> "InstrumentedExpression":
+    """Negate an expression."""
+    return InstrumentedExpression(f"NOT ({expression})")
diff -pruN 8.17.2-2/elasticsearch/esql/functions.py 9.2.0-1/elasticsearch/esql/functions.py
--- 8.17.2-2/elasticsearch/esql/functions.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/elasticsearch/esql/functions.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,1838 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import json
+from typing import Any
+
+from elasticsearch.dsl.document_base import InstrumentedExpression
+from elasticsearch.esql.esql import ESQLBase, ExpressionType
+
+
+def _render(v: Any) -> str:
+    return (
+        json.dumps(v)
+        if not isinstance(v, InstrumentedExpression)
+        else ESQLBase._format_expr(v)
+    )
+
+
+def abs(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the absolute value.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ABS({_render(number)})")
+
+
+def absent(field: ExpressionType) -> InstrumentedExpression:
+    """Returns true if the input expression yields no non-null values within the
+    current aggregation context.
+
+    :param field: Expression that outputs values to be checked for absence.
+    """
+    return InstrumentedExpression(f"ABSENT({_render(field)})")
+
+
+def absent_over_time(field: ExpressionType) -> InstrumentedExpression:
+    """Calculates the absence of a field in the output result over time range."""
+    return InstrumentedExpression(f"ABSENT_OVER_TIME({_render(field)})")
+
+
+def acos(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the arccosine of `n` as an angle, expressed in radians.
+
+    :param number: Number between -1 and 1. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ACOS({_render(number)})")
+
+
+def asin(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the arcsine of the input numeric expression as an angle,
+    expressed in radians.
+
+    :param number: Number between -1 and 1. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ASIN({_render(number)})")
+
+
+def atan(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the arctangent of the input numeric expression as an angle,
+    expressed in radians.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ATAN({_render(number)})")
+
+
+def atan2(
+    y_coordinate: ExpressionType, x_coordinate: ExpressionType
+) -> InstrumentedExpression:
+    """The angle between the positive x-axis and the ray from the origin to the
+    point (x , y) in the Cartesian plane, expressed in radians.
+
+    :param y_coordinate: y coordinate. If `null`, the function returns `null`.
+    :param x_coordinate: x coordinate. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(
+        f"ATAN2({_render(y_coordinate)}, {_render(x_coordinate)})"
+    )
+
+
+def avg(number: ExpressionType) -> InstrumentedExpression:
+    """The average of a numeric field.
+
+    :param number: Expression that outputs values to average.
+    """
+    return InstrumentedExpression(f"AVG({_render(number)})")
+
+
+def avg_over_time(number: ExpressionType) -> InstrumentedExpression:
+    """The average over time of a numeric field.
+
+    :param number: Expression that outputs values to average.
+    """
+    return InstrumentedExpression(f"AVG_OVER_TIME({_render(number)})")
+
+
+def bit_length(string: ExpressionType) -> InstrumentedExpression:
+    """Returns the bit length of a string.
+
+    :param string: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"BIT_LENGTH({_render(string)})")
+
+
+def bucket(
+    field: ExpressionType,
+    buckets: ExpressionType,
+    from_: ExpressionType,
+    to: ExpressionType,
+) -> InstrumentedExpression:
+    """Creates groups of values - buckets - out of a datetime or numeric input.
+    The size of the buckets can either be provided directly, or chosen based on
+    a recommended count and values range.
+
+    :param field: Numeric or date expression from which to derive buckets.
+    :param buckets: Target number of buckets, or desired bucket size if `from`
+                    and `to` parameters are omitted.
+    :param from_: Start of the range. Can be a number, a date or a date expressed
+                  as a string.
+    :param to: End of the range. Can be a number, a date or a date expressed as a string.
+    """
+    return InstrumentedExpression(
+        f"BUCKET({_render(field)}, {_render(buckets)}, {_render(from_)}, {_render(to)})"
+    )
+
+
+def byte_length(string: ExpressionType) -> InstrumentedExpression:
+    """Returns the byte length of a string.
+
+    :param string: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"BYTE_LENGTH({_render(string)})")
+
+
+def case(*conditions: ExpressionType) -> InstrumentedExpression:
+    """Accepts pairs of conditions and values. The function returns the value
+    that belongs to the first condition that evaluates to `true`.  If the
+    number of arguments is odd, the last argument is the default value which is
+    returned when no condition matches. If the number of arguments is even, and
+    no condition matches, the function returns `null`.
+    """
+    return InstrumentedExpression(
+        f'CASE({", ".join([_render(c) for c in conditions])})'
+    )
+
+
+def categorize(field: ExpressionType) -> InstrumentedExpression:
+    """Groups text messages into categories of similarly formatted text values.
+
+    :param field: Expression to categorize
+    """
+    return InstrumentedExpression(f"CATEGORIZE({_render(field)})")
+
+
+def cbrt(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the cube root of a number. The input can be any numeric value,
+    the return value is always a double. Cube roots of infinities are null.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"CBRT({_render(number)})")
+
+
+def ceil(number: ExpressionType) -> InstrumentedExpression:
+    """Round a number up to the nearest integer.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"CEIL({_render(number)})")
+
+
+def cidr_match(ip: ExpressionType, block_x: ExpressionType) -> InstrumentedExpression:
+    """Returns true if the provided IP is contained in one of the provided CIDR blocks.
+
+    :param ip: IP address of type `ip` (both IPv4 and IPv6 are supported).
+    :param block_x: CIDR block to test the IP against.
+    """
+    return InstrumentedExpression(f"CIDR_MATCH({_render(ip)}, {_render(block_x)})")
+
+
+def coalesce(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression:
+    """Returns the first of its arguments that is not null. If all arguments
+    are null, it returns `null`.
+
+    :param first: Expression to evaluate.
+    :param rest: Other expression to evaluate.
+    """
+    return InstrumentedExpression(f"COALESCE({_render(first)}, {_render(rest)})")
+
+
+def concat(*strings: ExpressionType) -> InstrumentedExpression:
+    """Concatenates two or more strings."""
+    return InstrumentedExpression(
+        f'CONCAT({", ".join([f"{_render(s)}" for s in strings])})'
+    )
+
+
+def cos(angle: ExpressionType) -> InstrumentedExpression:
+    """Returns the cosine of an angle.
+
+    :param angle: An angle, in radians. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"COS({_render(angle)})")
+
+
+def cosh(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the hyperbolic cosine of a number.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"COSH({_render(number)})")
+
+
+def count(field: ExpressionType) -> InstrumentedExpression:
+    """Returns the total number (count) of input values.
+
+    :param field: Expression that outputs values to be counted. If omitted,
+                  equivalent to `COUNT(*)` (the number of rows).
+    """
+    return InstrumentedExpression(f"COUNT({_render(field)})")
+
+
+def count_distinct(
+    field: ExpressionType, precision: ExpressionType
+) -> InstrumentedExpression:
+    """Returns the approximate number of distinct values.
+
+    :param field: Column or literal for which to count the number of distinct values.
+    :param precision: Precision threshold. The maximum supported value is 40000. Thresholds
+                      above this number will have the same effect as a threshold of 40000.
+                      The default value is 3000.
+    """
+    return InstrumentedExpression(
+        f"COUNT_DISTINCT({_render(field)}, {_render(precision)})"
+    )
+
+
+def count_distinct_over_time(
+    field: ExpressionType, precision: ExpressionType
+) -> InstrumentedExpression:
+    """The count of distinct values over time for a field.
+
+    :param field:
+    :param precision: Precision threshold. The maximum supported value is 40000. Thresholds
+                      above this number will have the same effect as a threshold of 40000. The
+                      default value is 3000.
+    """
+    return InstrumentedExpression(
+        f"COUNT_DISTINCT_OVER_TIME({_render(field)}, {_render(precision)})"
+    )
+
+
+def count_over_time(field: ExpressionType) -> InstrumentedExpression:
+    """The count over time value of a field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"COUNT_OVER_TIME({_render(field)})")
+
+
+def date_diff(
+    unit: ExpressionType, start_timestamp: ExpressionType, end_timestamp: ExpressionType
+) -> InstrumentedExpression:
+    """Subtracts the `startTimestamp` from the `endTimestamp` and returns the
+    difference in multiples of `unit`. If `startTimestamp` is later than the
+    `endTimestamp`, negative values are returned.
+
+    :param unit: Time difference unit
+    :param start_timestamp: A string representing a start timestamp
+    :param end_timestamp: A string representing an end timestamp
+    """
+    return InstrumentedExpression(
+        f"DATE_DIFF({_render(unit)}, {_render(start_timestamp)}, {_render(end_timestamp)})"
+    )
+
+
+def date_extract(
+    date_part: ExpressionType, date: ExpressionType
+) -> InstrumentedExpression:
+    """Extracts parts of a date, like year, month, day, hour.
+
+    :param date_part: Part of the date to extract.  Can be:
+        `aligned_day_of_week_in_month`, `aligned_day_of_week_in_year`,
+        `aligned_week_of_month`, `aligned_week_of_year`, `ampm_of_day`,
+        `clock_hour_of_ampm`, `clock_hour_of_day`, `day_of_month`, `day_of_week`,
+        `day_of_year`, `epoch_day`, `era`, `hour_of_ampm`, `hour_of_day`,
+        `instant_seconds`, `micro_of_day`, `micro_of_second`, `milli_of_day`,
+        `milli_of_second`, `minute_of_day`, `minute_of_hour`, `month_of_year`,
+        `nano_of_day`, `nano_of_second`, `offset_seconds`, `proleptic_month`,
+        `second_of_day`, `second_of_minute`, `year`, or `year_of_era`. If `null`,
+        the function returns `null`.
+    :param date: Date expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(
+        f"DATE_EXTRACT({_render(date_part)}, {_render(date)})"
+    )
+
+
+def date_format(
+    date: ExpressionType,
+    date_format: ExpressionType = None,
+) -> InstrumentedExpression:
+    """Returns a string representation of a date, in the provided format.
+
+    :param date_format: Date format (optional).  If no format is specified, the
+                        `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the
+                        function returns `null`.
+    :param date: Date expression. If `null`, the function returns `null`.
+    """
+    if date_format is not None:
+        return InstrumentedExpression(
+            f"DATE_FORMAT({_render(date_format)}, {_render(date)})"
+        )
+    else:
+        return InstrumentedExpression(f"DATE_FORMAT({_render(date)})")
+
+
+def date_parse(
+    date_pattern: ExpressionType, date_string: ExpressionType
+) -> InstrumentedExpression:
+    """Returns a date by parsing the second argument using the format specified
+    in the first argument.
+
+    :param date_pattern: The date format. If `null`, the function returns `null`.
+    :param date_string: Date expression as a string. If `null` or an empty
+                        string, the function returns `null`.
+    """
+    return InstrumentedExpression(
+        f"DATE_PARSE({_render(date_pattern)}, {_render(date_string)})"
+    )
+
+
+def date_trunc(
+    interval: ExpressionType, date: ExpressionType
+) -> InstrumentedExpression:
+    """Rounds down a date to the closest interval since epoch, which starts at `0001-01-01T00:00:00Z`.
+
+    :param interval: Interval; expressed using the timespan literal syntax.
+    :param date: Date expression
+    """
+    return InstrumentedExpression(f"DATE_TRUNC({_render(interval)}, {_render(date)})")
+
+
+def e() -> InstrumentedExpression:
+    """Returns Euler’s number)."""
+    return InstrumentedExpression("E()")
+
+
+def ends_with(str: ExpressionType, suffix: ExpressionType) -> InstrumentedExpression:
+    """Returns a boolean that indicates whether a keyword string ends with
+    another string.
+
+    :param str: String expression. If `null`, the function returns `null`.
+    :param suffix: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ENDS_WITH({_render(str)}, {_render(suffix)})")
+
+
+def exp(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the value of e raised to the power of the given number.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"EXP({_render(number)})")
+
+
+def first(value: ExpressionType, sort: ExpressionType) -> InstrumentedExpression:
+    """Calculates the earliest value of a field."""
+    return InstrumentedExpression(f"FIRST({_render(value)}, {_render(sort)})")
+
+
+def first_over_time(field: ExpressionType) -> InstrumentedExpression:
+    """The earliest value of a field, where recency determined by the
+    `@timestamp` field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"FIRST_OVER_TIME({_render(field)})")
+
+
+def floor(number: ExpressionType) -> InstrumentedExpression:
+    """Round a number down to the nearest integer.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"FLOOR({_render(number)})")
+
+
+def from_base64(string: ExpressionType) -> InstrumentedExpression:
+    """Decode a base64 string.
+
+    :param string: A base64 string.
+    """
+    return InstrumentedExpression(f"FROM_BASE64({_render(string)})")
+
+
+def greatest(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression:
+    """Returns the maximum value from multiple columns. This is similar to
+    `MV_MAX` except it is intended to run on multiple columns at once.
+
+    :param first: First of the columns to evaluate.
+    :param rest: The rest of the columns to evaluate.
+    """
+    return InstrumentedExpression(f"GREATEST({_render(first)}, {_render(rest)})")
+
+
+def hash(algorithm: ExpressionType, input: ExpressionType) -> InstrumentedExpression:
+    """Computes the hash of the input using various algorithms such as MD5,
+    SHA, SHA-224, SHA-256, SHA-384, SHA-512.
+
+    :param algorithm: Hash algorithm to use.
+    :param input: Input to hash.
+    """
+    return InstrumentedExpression(f"HASH({_render(algorithm)}, {_render(input)})")
+
+
+def hypot(number1: ExpressionType, number2: ExpressionType) -> InstrumentedExpression:
+    """Returns the hypotenuse of two numbers. The input can be any numeric
+    values, the return value is always a double. Hypotenuses of infinities are null.
+
+    :param number1: Numeric expression. If `null`, the function returns `null`.
+    :param number2: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"HYPOT({number1}, {number2})")
+
+
+def ip_prefix(
+    ip: ExpressionType,
+    prefix_length_v4: ExpressionType,
+    prefix_length_v6: ExpressionType,
+) -> InstrumentedExpression:
+    """Truncates an IP to a given prefix length.
+
+    :param ip: IP address of type `ip` (both IPv4 and IPv6 are supported).
+    :param prefix_length_v4: Prefix length for IPv4 addresses.
+    :param prefix_length_v6: Prefix length for IPv6 addresses.
+    """
+    return InstrumentedExpression(
+        f"IP_PREFIX({_render(ip)}, {prefix_length_v4}, {prefix_length_v6})"
+    )
+
+
+def knn(
+    field: ExpressionType, query: ExpressionType, options: ExpressionType = None
+) -> InstrumentedExpression:
+    """Finds the k nearest vectors to a query vector, as measured by a
+    similarity metric. knn function finds nearest vectors through approximate
+    search on indexed dense_vectors.
+
+    :param field: Field that the query will target.
+    :param query: Vector value to find top nearest neighbours for.
+    :param options: (Optional) kNN additional options as function named parameters.
+    """
+    if options is not None:
+        return InstrumentedExpression(
+            f"KNN({_render(field)}, {_render(query)}, {_render(options)})"
+        )
+    else:
+        return InstrumentedExpression(f"KNN({_render(field)}, {_render(query)})")
+
+
+def kql(query: ExpressionType) -> InstrumentedExpression:
+    """Performs a KQL query. Returns true if the provided KQL query string
+    matches the row.
+
+    :param query: Query string in KQL query string format.
+    """
+    return InstrumentedExpression(f"KQL({_render(query)})")
+
+
+def last(value: ExpressionType, sort: ExpressionType) -> InstrumentedExpression:
+    """Calculates the latest value of a field."""
+    return InstrumentedExpression(f"LAST({_render(value)}, {_render(sort)})")
+
+
+def last_over_time(field: ExpressionType) -> InstrumentedExpression:
+    """The latest value of a field, where recency determined by the
+    `@timestamp` field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"LAST_OVER_TIME({_render(field)})")
+
+
+def least(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression:
+    """Returns the minimum value from multiple columns. This is similar to
+    `MV_MIN` except it is intended to run on multiple columns at once.
+
+    :param first: First of the columns to evaluate.
+    :param rest: The rest of the columns to evaluate.
+    """
+    return InstrumentedExpression(f"LEAST({_render(first)}, {_render(rest)})")
+
+
+def left(string: ExpressionType, length: ExpressionType) -> InstrumentedExpression:
+    """Returns the substring that extracts *length* chars from *string*
+    starting from the left.
+
+    :param string: The string from which to return a substring.
+    :param length: The number of characters to return.
+    """
+    return InstrumentedExpression(f"LEFT({_render(string)}, {_render(length)})")
+
+
+def length(string: ExpressionType) -> InstrumentedExpression:
+    """Returns the character length of a string.
+
+    :param string: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"LENGTH({_render(string)})")
+
+
+def locate(
+    string: ExpressionType, substring: ExpressionType, start: ExpressionType
+) -> InstrumentedExpression:
+    """Returns an integer that indicates the position of a keyword substring
+    within another string. Returns `0` if the substring cannot be found. Note
+    that string positions start from `1`.
+
+    :param string: An input string
+    :param substring: A substring to locate in the input string
+    :param start: The start index
+    """
+    return InstrumentedExpression(
+        f"LOCATE({_render(string)}, {_render(substring)}, {_render(start)})"
+    )
+
+
+def log(base: ExpressionType, number: ExpressionType) -> InstrumentedExpression:
+    """Returns the logarithm of a value to a base. The input can be any numeric
+    value, the return value is always a double.  Logs of zero, negative
+    numbers, and base of one return `null` as well as a warning.
+
+    :param base: Base of logarithm. If `null`, the function returns `null`. If
+                 not provided, this function returns the natural logarithm (base e) of a value.
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"LOG({_render(base)}, {_render(number)})")
+
+
+def log10(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the logarithm of a value to base 10. The input can be any
+    numeric value, the return value is always a double.  Logs of 0 and negative
+    numbers return `null` as well as a warning.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"LOG10({_render(number)})")
+
+
+def ltrim(string: ExpressionType) -> InstrumentedExpression:
+    """Removes leading whitespaces from a string.
+
+    :param string: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"LTRIM({_render(string)})")
+
+
+def match(
+    field: ExpressionType, query: ExpressionType, options: ExpressionType = None
+) -> InstrumentedExpression:
+    """Use `MATCH` to perform a match query on the specified field. Using
+    `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.
+
+    :param field: Field that the query will target.
+    :param query: Value to find in the provided field.
+    :param options: (Optional) Match additional options as function named parameters.
+    """
+    if options is not None:
+        return InstrumentedExpression(
+            f"MATCH({_render(field)}, {_render(query)}, {_render(options)})"
+        )
+    else:
+        return InstrumentedExpression(f"MATCH({_render(field)}, {_render(query)})")
+
+
+def match_phrase(
+    field: ExpressionType, query: ExpressionType, options: ExpressionType = None
+) -> InstrumentedExpression:
+    """Use `MATCH_PHRASE` to perform a `match_phrase` on the specified field.
+    Using `MATCH_PHRASE` is equivalent to using the `match_phrase` query in the
+    Elasticsearch Query DSL.
+
+    :param field: Field that the query will target.
+    :param query: Value to find in the provided field.
+    :param options: (Optional) MatchPhrase additional options as function named parameters.
+    """
+    if options is not None:
+        return InstrumentedExpression(
+            f"MATCH_PHRASE({_render(field)}, {_render(query)}, {_render(options)})"
+        )
+    else:
+        return InstrumentedExpression(
+            f"MATCH_PHRASE({_render(field)}, {_render(query)})"
+        )
+
+
+def max(field: ExpressionType) -> InstrumentedExpression:
+    """The maximum value of a field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"MAX({_render(field)})")
+
+
+def max_over_time(field: ExpressionType) -> InstrumentedExpression:
+    """The maximum over time value of a field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"MAX_OVER_TIME({_render(field)})")
+
+
+def md5(input: ExpressionType) -> InstrumentedExpression:
+    """Computes the MD5 hash of the input.
+
+    :param input: Input to hash.
+    """
+    return InstrumentedExpression(f"MD5({_render(input)})")
+
+
+def median(number: ExpressionType) -> InstrumentedExpression:
+    """The value that is greater than half of all values and less than half of
+    all values, also known as the 50% `PERCENTILE`.
+
+    :param number: Expression that outputs values to calculate the median of.
+    """
+    return InstrumentedExpression(f"MEDIAN({_render(number)})")
+
+
+def median_absolute_deviation(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the median absolute deviation, a measure of variability. It is a
+    robust statistic, meaning that it is useful for describing data that may
+    have outliers, or may not be normally distributed. For such data it can be
+    more descriptive than standard deviation.  It is calculated as the median
+    of each data point’s deviation from the median of the entire sample. That
+    is, for a random variable `X`, the median absolute deviation is
+    `median(|median(X) - X|)`.
+
+    :param number:
+    """
+    return InstrumentedExpression(f"MEDIAN_ABSOLUTE_DEVIATION({_render(number)})")
+
+
+def min(field: ExpressionType) -> InstrumentedExpression:
+    """The minimum value of a field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"MIN({_render(field)})")
+
+
+def min_over_time(field: ExpressionType) -> InstrumentedExpression:
+    """The minimum over time value of a field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"MIN_OVER_TIME({_render(field)})")
+
+
+def multi_match(
+    query: ExpressionType, *fields: ExpressionType, options: ExpressionType = None
+) -> InstrumentedExpression:
+    """Use `MULTI_MATCH` to perform a multi-match query on the specified field.
+    The multi_match query builds on the match query to allow multi-field queries.
+
+    :param query: Value to find in the provided fields.
+    :param fields: Fields to use for matching
+    :param options: (Optional) Additional options for MultiMatch, passed as function
+                    named parameters
+    """
+    if options is not None:
+        return InstrumentedExpression(
+            f'MULTI_MATCH({_render(query)}, {", ".join([_render(c) for c in fields])}, {_render(options)})'
+        )
+    else:
+        return InstrumentedExpression(
+            f'MULTI_MATCH({_render(query)}, {", ".join([_render(c) for c in fields])})'
+        )
+
+
+def mv_append(field1: ExpressionType, field2: ExpressionType) -> InstrumentedExpression:
+    """Concatenates values of two multi-value fields.
+
+    :param field1:
+    :param field2:
+    """
+    return InstrumentedExpression(f"MV_APPEND({field1}, {field2})")
+
+
+def mv_avg(number: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued field into a single valued field containing the
+    average of all of the values.
+
+    :param number: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_AVG({_render(number)})")
+
+
+def mv_concat(string: ExpressionType, delim: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued string expression into a single valued column
+    containing the concatenation of all values separated by a delimiter.
+
+    :param string: Multivalue expression.
+    :param delim: Delimiter.
+    """
+    return InstrumentedExpression(f"MV_CONCAT({_render(string)}, {_render(delim)})")
+
+
+def mv_contains(
+    superset: ExpressionType, subset: ExpressionType
+) -> InstrumentedExpression:
+    """Checks if all values yielded by the second multivalue expression are present in the
+    values yielded by the first multivalue expression. Returns a boolean. Null values are
+    treated as an empty set.
+    """
+    return InstrumentedExpression(
+        f"MV_CONTAINS({_render(superset)}, {_render(subset)})"
+    )
+
+
+def mv_count(field: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued expression into a single valued column containing
+    a count of the number of values.
+
+    :param field: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_COUNT({_render(field)})")
+
+
+def mv_dedupe(field: ExpressionType) -> InstrumentedExpression:
+    """Remove duplicate values from a multivalued field.
+
+    :param field: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_DEDUPE({_render(field)})")
+
+
+def mv_first(field: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued expression into a single valued column containing
+    the first value. This is most useful when reading from a function that
+    emits multivalued columns in a known order like `SPLIT`.
+
+    :param field: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_FIRST({_render(field)})")
+
+
+def mv_last(field: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalue expression into a single valued column containing
+    the last value. This is most useful when reading from a function that emits
+    multivalued columns in a known order like `SPLIT`.
+
+    :param field: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_LAST({_render(field)})")
+
+
+def mv_max(field: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued expression into a single valued column containing
+    the maximum value.
+
+    :param field: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_MAX({_render(field)})")
+
+
+def mv_median(number: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued field into a single valued field containing the
+    median value.
+
+    :param number: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_MEDIAN({_render(number)})")
+
+
+def mv_median_absolute_deviation(number: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued field into a single valued field containing the
+    median absolute deviation.  It is calculated as the median of each data
+    point’s deviation from the median of the entire sample. That is, for a
+    random variable `X`, the median absolute deviation is `median(|median(X) - X|)`.
+
+    :param number: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_MEDIAN_ABSOLUTE_DEVIATION({_render(number)})")
+
+
+def mv_min(field: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued expression into a single valued column containing
+    the minimum value.
+
+    :param field: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_MIN({_render(field)})")
+
+
+def mv_percentile(
+    number: ExpressionType, percentile: ExpressionType
+) -> InstrumentedExpression:
+    """Converts a multivalued field into a single valued field containing the
+    value at which a certain percentage of observed values occur.
+
+    :param number: Multivalue expression.
+    :param percentile: The percentile to calculate. Must be a number between 0
+                       and 100. Numbers out of range will return a null instead.
+    """
+    return InstrumentedExpression(
+        f"MV_PERCENTILE({_render(number)}, {_render(percentile)})"
+    )
+
+
+def mv_pseries_weighted_sum(
+    number: ExpressionType, p: ExpressionType
+) -> InstrumentedExpression:
+    """Converts a multivalued expression into a single-valued column by
+    multiplying every element on the input list by its corresponding term in
+    P-Series and computing the sum.
+
+    :param number: Multivalue expression.
+    :param p: It is a constant number that represents the *p* parameter in the
+              P-Series. It impacts every element’s contribution to the weighted sum.
+    """
+    return InstrumentedExpression(
+        f"MV_PSERIES_WEIGHTED_SUM({_render(number)}, {_render(p)})"
+    )
+
+
+def mv_slice(
+    field: ExpressionType, start: ExpressionType, end: ExpressionType = None
+) -> InstrumentedExpression:
+    """Returns a subset of the multivalued field using the start and end index
+    values. This is most useful when reading from a function that emits
+    multivalued columns in a known order like `SPLIT` or `MV_SORT`.
+
+    :param field: Multivalue expression. If `null`, the function returns `null`.
+    :param start: Start position. If `null`, the function returns `null`. The
+                  start argument can be negative. An index of -1 is used to specify
+                  the last value in the list.
+    :param end: End position(included). Optional; if omitted, the position at
+                `start` is returned. The end argument can be negative. An index of -1
+                is used to specify the last value in the list.
+    """
+    if end is not None:
+        return InstrumentedExpression(
+            f"MV_SLICE({_render(field)}, {_render(start)}, {_render(end)})"
+        )
+    else:
+        return InstrumentedExpression(f"MV_SLICE({_render(field)}, {_render(start)})")
+
+
+def mv_sort(field: ExpressionType, order: ExpressionType) -> InstrumentedExpression:
+    """Sorts a multivalued field in lexicographical order.
+
+    :param field: Multivalue expression. If `null`, the function returns `null`.
+    :param order: Sort order. The valid options are ASC and DESC, the default is ASC.
+    """
+    return InstrumentedExpression(f"MV_SORT({_render(field)}, {_render(order)})")
+
+
+def mv_sum(number: ExpressionType) -> InstrumentedExpression:
+    """Converts a multivalued field into a single valued field containing the
+    sum of all of the values.
+
+    :param number: Multivalue expression.
+    """
+    return InstrumentedExpression(f"MV_SUM({_render(number)})")
+
+
+def mv_zip(
+    string1: ExpressionType, string2: ExpressionType, delim: ExpressionType = None
+) -> InstrumentedExpression:
+    """Combines the values from two multivalued fields with a delimiter that
+    joins them together.
+
+    :param string1: Multivalue expression.
+    :param string2: Multivalue expression.
+    :param delim: Delimiter. Optional; if omitted, `,` is used as a default delimiter.
+    """
+    if delim is not None:
+        return InstrumentedExpression(f"MV_ZIP({string1}, {string2}, {_render(delim)})")
+    else:
+        return InstrumentedExpression(f"MV_ZIP({string1}, {string2})")
+
+
+def now() -> InstrumentedExpression:
+    """Returns current date and time."""
+    return InstrumentedExpression("NOW()")
+
+
+def percentile(
+    number: ExpressionType, percentile: ExpressionType
+) -> InstrumentedExpression:
+    """Returns the value at which a certain percentage of observed values
+    occur. For example, the 95th percentile is the value which is greater than
+    95% of the observed values and the 50th percentile is the `MEDIAN`.
+
+    :param number:
+    :param percentile:
+    """
+    return InstrumentedExpression(
+        f"PERCENTILE({_render(number)}, {_render(percentile)})"
+    )
+
+
+def pi() -> InstrumentedExpression:
+    """Returns Pi, the ratio of a circle’s circumference to its diameter."""
+    return InstrumentedExpression("PI()")
+
+
+def pow(base: ExpressionType, exponent: ExpressionType) -> InstrumentedExpression:
+    """Returns the value of `base` raised to the power of `exponent`.
+
+    :param base: Numeric expression for the base. If `null`, the function returns `null`.
+    :param exponent: Numeric expression for the exponent. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"POW({_render(base)}, {_render(exponent)})")
+
+
+def present(field: ExpressionType) -> InstrumentedExpression:
+    """Returns true if the input expression yields any non-null values within the current
+    aggregation context. Otherwise it returns false.
+    """
+    return InstrumentedExpression(f"PRESENT({_render(field)})")
+
+
+def present_over_time(field: ExpressionType) -> InstrumentedExpression:
+    """Calculates the presence of a field in the output result over time range."""
+    return InstrumentedExpression(f"PRESENT_OVER_TIME({_render(field)})")
+
+
+def qstr(
+    query: ExpressionType, options: ExpressionType = None
+) -> InstrumentedExpression:
+    """Performs a query string query. Returns true if the provided query string
+    matches the row.
+
+    :param query: Query string in Lucene query string format.
+    :param options: (Optional) Additional options for Query String as function named
+                    parameters.
+    """
+    if options is not None:
+        return InstrumentedExpression(f"QSTR({_render(query)}, {_render(options)})")
+    else:
+        return InstrumentedExpression(f"QSTR({_render(query)})")
+
+
+def rate(field: ExpressionType) -> InstrumentedExpression:
+    """The rate of a counter field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"RATE({_render(field)})")
+
+
+def repeat(string: ExpressionType, number: ExpressionType) -> InstrumentedExpression:
+    """Returns a string constructed by concatenating `string` with itself the
+    specified `number` of times.
+
+    :param string: String expression.
+    :param number: Number times to repeat.
+    """
+    return InstrumentedExpression(f"REPEAT({_render(string)}, {_render(number)})")
+
+
+def replace(
+    string: ExpressionType, regex: ExpressionType, new_string: ExpressionType
+) -> InstrumentedExpression:
+    """The function substitutes in the string `str` any match of the regular
+    expression `regex` with the replacement string `newStr`.
+
+    :param string: String expression.
+    :param regex: Regular expression.
+    :param new_string: Replacement string.
+    """
+    return InstrumentedExpression(
+        f"REPLACE({_render(string)}, {_render(regex)}, {_render(new_string)})"
+    )
+
+
+def reverse(str: ExpressionType) -> InstrumentedExpression:
+    """Returns a new string representing the input string in reverse order.
+
+    :param str: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"REVERSE({_render(str)})")
+
+
+def right(string: ExpressionType, length: ExpressionType) -> InstrumentedExpression:
+    """Return the substring that extracts *length* chars from *str* starting
+    from the right.
+
+    :param string: The string from which to returns a substring.
+    :param length: The number of characters to return.
+    """
+    return InstrumentedExpression(f"RIGHT({_render(string)}, {_render(length)})")
+
+
+def round(
+    number: ExpressionType, decimals: ExpressionType = None
+) -> InstrumentedExpression:
+    """Rounds a number to the specified number of decimal places. Defaults to
+    0, which returns the nearest integer. If the precision is a negative
+    number, rounds to the number of digits left of the decimal point.
+
+    :param number: The numeric value to round. If `null`, the function returns `null`.
+    :param decimals: The number of decimal places to round to. Defaults to 0. If
+                     `null`, the function returns `null`.
+    """
+    if decimals is not None:
+        return InstrumentedExpression(f"ROUND({_render(number)}, {_render(decimals)})")
+    else:
+        return InstrumentedExpression(f"ROUND({_render(number)})")
+
+
+def round_to(field: ExpressionType, points: ExpressionType) -> InstrumentedExpression:
+    """Rounds down to one of a list of fixed points.
+
+    :param field: The numeric value to round. If `null`, the function returns `null`.
+    :param points: Remaining rounding points. Must be constants.
+    """
+    return InstrumentedExpression(f"ROUND_TO({_render(field)}, {_render(points)})")
+
+
+def rtrim(string: ExpressionType) -> InstrumentedExpression:
+    """Removes trailing whitespaces from a string.
+
+    :param string: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"RTRIM({_render(string)})")
+
+
+def sample(field: ExpressionType, limit: ExpressionType) -> InstrumentedExpression:
+    """Collects sample values for a field.
+
+    :param field: The field to collect sample values for.
+    :param limit: The maximum number of values to collect.
+    """
+    return InstrumentedExpression(f"SAMPLE({_render(field)}, {_render(limit)})")
+
+
+def scalb(d: ExpressionType, scale_factor: ExpressionType) -> InstrumentedExpression:
+    """Returns the result of `d * 2 ^ scaleFactor`, Similar to Java's `scalb`
+    function. Result is rounded as if performed by a single correctly rounded
+    floating-point multiply to a member of the double value set.
+
+    :param d: Numeric expression for the multiplier. If `null`, the function
+              returns `null`.
+    :param scale_factor: Numeric expression for the scale factor. If `null`, the
+                         function returns `null`.
+    """
+    return InstrumentedExpression(f"SCALB({_render(d)}, {_render(scale_factor)})")
+
+
+def sha1(input: ExpressionType) -> InstrumentedExpression:
+    """Computes the SHA1 hash of the input.
+
+    :param input: Input to hash.
+    """
+    return InstrumentedExpression(f"SHA1({_render(input)})")
+
+
+def sha256(input: ExpressionType) -> InstrumentedExpression:
+    """Computes the SHA256 hash of the input.
+
+    :param input: Input to hash.
+    """
+    return InstrumentedExpression(f"SHA256({_render(input)})")
+
+
+def signum(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the sign of the given number. It returns `-1` for negative
+    numbers, `0` for `0` and `1` for positive numbers.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"SIGNUM({_render(number)})")
+
+
+def sin(angle: ExpressionType) -> InstrumentedExpression:
+    """Returns the sine of an angle.
+
+    :param angle: An angle, in radians. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"SIN({_render(angle)})")
+
+
+def sinh(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the hyperbolic sine of a number.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"SINH({_render(number)})")
+
+
+def space(number: ExpressionType) -> InstrumentedExpression:
+    """Returns a string made of `number` spaces.
+
+    :param number: Number of spaces in result.
+    """
+    return InstrumentedExpression(f"SPACE({_render(number)})")
+
+
+def split(string: ExpressionType, delim: ExpressionType) -> InstrumentedExpression:
+    """Split a single valued string into multiple strings.
+
+    :param string: String expression. If `null`, the function returns `null`.
+    :param delim: Delimiter. Only single byte delimiters are currently supported.
+    """
+    return InstrumentedExpression(f"SPLIT({_render(string)}, {_render(delim)})")
+
+
+def sqrt(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the square root of a number. The input can be any numeric value,
+    the return value is always a double. Square roots of negative numbers and
+    infinities are null.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"SQRT({_render(number)})")
+
+
+def starts_with(str: ExpressionType, prefix: ExpressionType) -> InstrumentedExpression:
+    """Returns a boolean that indicates whether a keyword string starts with
+    another string.
+
+    :param str: String expression. If `null`, the function returns `null`.
+    :param prefix: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"STARTS_WITH({_render(str)}, {_render(prefix)})")
+
+
+def std_dev(number: ExpressionType) -> InstrumentedExpression:
+    """The population standard deviation of a numeric field.
+
+    :param number:
+    """
+    return InstrumentedExpression(f"STD_DEV({_render(number)})")
+
+
+def st_centroid_agg(field: ExpressionType) -> InstrumentedExpression:
+    """Calculate the spatial centroid over a field with spatial point geometry type.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"ST_CENTROID_AGG({_render(field)})")
+
+
+def st_contains(
+    geom_a: ExpressionType, geom_b: ExpressionType
+) -> InstrumentedExpression:
+    """Returns whether the first geometry contains the second geometry. This is
+    the inverse of the ST_WITHIN function.
+
+    :param geom_a: Expression of type `geo_point`, `cartesian_point`,
+                   `geo_shape` or `cartesian_shape`. If `null`, the function returns
+                   `null`.
+    :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape`
+                   or `cartesian_shape`. If `null`, the function returns `null`. The
+                   second parameter must also have the same coordinate system as the
+                   first. This means it is not possible to combine `geo_*` and
+                   `cartesian_*` parameters.
+    """
+    return InstrumentedExpression(f"ST_CONTAINS({_render(geom_a)}, {_render(geom_b)})")
+
+
+def st_disjoint(
+    geom_a: ExpressionType, geom_b: ExpressionType
+) -> InstrumentedExpression:
+    """Returns whether the two geometries or geometry columns are disjoint.
+    This is the inverse of the ST_INTERSECTS function. In mathematical terms:
+    ST_Disjoint(A, B) ⇔ A ⋂ B = ∅
+
+    :param geom_a: Expression of type `geo_point`, `cartesian_point`,
+                   `geo_shape` or `cartesian_shape`. If `null`, the function returns
+                   `null`.
+    :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape`
+                   or `cartesian_shape`. If `null`, the function returns `null`. The
+                   second parameter must also have the same coordinate system as the
+                   first. This means it is not possible to combine `geo_*` and
+                   `cartesian_*` parameters.
+    """
+    return InstrumentedExpression(f"ST_DISJOINT({_render(geom_a)}, {_render(geom_b)})")
+
+
+def st_distance(
+    geom_a: ExpressionType, geom_b: ExpressionType
+) -> InstrumentedExpression:
+    """Computes the distance between two points. For cartesian geometries, this
+    is the pythagorean distance in the same units as the original coordinates.
+    For geographic geometries, this is the circular distance along the great
+    circle in meters.
+
+    :param geom_a: Expression of type `geo_point` or `cartesian_point`. If
+                   `null`, the function returns `null`.
+    :param geom_b: Expression of type `geo_point` or `cartesian_point`. If
+                   `null`, the function returns `null`. The second parameter must
+                   also have the same coordinate system as the first. This means it
+                   is not possible to combine `geo_point` and `cartesian_point` parameters.
+    """
+    return InstrumentedExpression(f"ST_DISTANCE({_render(geom_a)}, {_render(geom_b)})")
+
+
+def st_envelope(geometry: ExpressionType) -> InstrumentedExpression:
+    """Determines the minimum bounding box of the supplied geometry.
+
+    :param geometry: Expression of type `geo_point`, `geo_shape`,
+                     `cartesian_point` or `cartesian_shape`. If `null`, the function
+                     returns `null`.
+    """
+    return InstrumentedExpression(f"ST_ENVELOPE({_render(geometry)})")
+
+
+def st_extent_agg(field: ExpressionType) -> InstrumentedExpression:
+    """Calculate the spatial extent over a field with geometry type. Returns a
+    bounding box for all values of the field.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"ST_EXTENT_AGG({_render(field)})")
+
+
+def st_geohash(
+    geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None
+) -> InstrumentedExpression:
+    """Calculates the `geohash` of the supplied geo_point at the specified
+    precision. The result is long encoded. Use ST_GEOHASH_TO_STRING to convert
+    the result to a string.  These functions are related to the `geo_grid`
+    query and the `geohash_grid` aggregation.
+
+    :param geometry: Expression of type `geo_point`. If `null`, the function
+                     returns `null`.
+    :param precision: Expression of type `integer`. If `null`, the function
+                      returns `null`. Valid values are between 1 and 12.
+    :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of
+                   type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape` is of any
+                   other type.
+    """
+    if bounds is not None:
+        return InstrumentedExpression(
+            f"ST_GEOHASH({_render(geometry)}, {_render(precision)}, {_render(bounds)})"
+        )
+    else:
+        return InstrumentedExpression(
+            f"ST_GEOHASH({_render(geometry)}, {_render(precision)})"
+        )
+
+
+def st_geohash_to_long(grid_id: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value representing a geohash grid-ID in string format
+    into a long.
+
+    :param grid_id: Input geohash grid-id. The input can be a single- or
+                    multi-valued column or an expression.
+    """
+    return InstrumentedExpression(f"ST_GEOHASH_TO_LONG({_render(grid_id)})")
+
+
+def st_geohash_to_string(grid_id: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value representing a geohash grid-ID in long format
+    into a string.
+
+    :param grid_id: Input geohash grid-id. The input can be a single- or
+                    multi-valued column or an expression.
+    """
+    return InstrumentedExpression(f"ST_GEOHASH_TO_STRING({_render(grid_id)})")
+
+
+def st_geohex(
+    geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None
+) -> InstrumentedExpression:
+    """Calculates the `geohex`, the H3 cell-id, of the supplied geo_point at
+    the specified precision. The result is long encoded. Use
+    ST_GEOHEX_TO_STRING to convert the result to a string.  These functions are
+    related to the `geo_grid` query and the `geohex_grid` aggregation.
+
+    :param geometry: Expression of type `geo_point`. If `null`, the function
+                     returns `null`.
+    :param precision: Expression of type `integer`. If `null`, the function
+                      returns `null`. Valid values are between 0 and 15.
+    :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of
+                   type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape`
+                   is of any other type.
+    """
+    if bounds is not None:
+        return InstrumentedExpression(
+            f"ST_GEOHEX({_render(geometry)}, {_render(precision)}, {_render(bounds)})"
+        )
+    else:
+        return InstrumentedExpression(
+            f"ST_GEOHEX({_render(geometry)}, {_render(precision)})"
+        )
+
+
+def st_geohex_to_long(grid_id: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value representing a geohex grid-ID in string format
+    into a long.
+
+    :param grid_id: Input geohex grid-id. The input can be a single- or
+                    multi-valued column or an expression.
+    """
+    return InstrumentedExpression(f"ST_GEOHEX_TO_LONG({_render(grid_id)})")
+
+
+def st_geohex_to_string(grid_id: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value representing a Geohex grid-ID in long format
+    into a string.
+
+    :param grid_id: Input Geohex grid-id. The input can be a single- or
+                    multi-valued column or an expression.
+    """
+    return InstrumentedExpression(f"ST_GEOHEX_TO_STRING({_render(grid_id)})")
+
+
+def st_geotile(
+    geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None
+) -> InstrumentedExpression:
+    """Calculates the `geotile` of the supplied geo_point at the specified
+    precision. The result is long encoded. Use ST_GEOTILE_TO_STRING to convert
+    the result to a string.  These functions are related to the `geo_grid`
+    query and the `geotile_grid` aggregation.
+
+    :param geometry: Expression of type `geo_point`. If `null`, the function
+                     returns `null`.
+    :param precision: Expression of type `integer`. If `null`, the function
+                      returns `null`. Valid values are between 0 and 29.
+    :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of
+                   type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape` is of any
+                   other type.
+    """
+    if bounds is not None:
+        return InstrumentedExpression(
+            f"ST_GEOTILE({_render(geometry)}, {_render(precision)}, {_render(bounds)})"
+        )
+    else:
+        return InstrumentedExpression(
+            f"ST_GEOTILE({_render(geometry)}, {_render(precision)})"
+        )
+
+
+def st_geotile_to_long(grid_id: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value representing a geotile grid-ID in string format
+    into a long.
+
+    :param grid_id: Input geotile grid-id. The input can be a single- or
+                    multi-valued column or an expression.
+    """
+    return InstrumentedExpression(f"ST_GEOTILE_TO_LONG({_render(grid_id)})")
+
+
+def st_geotile_to_string(grid_id: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value representing a geotile grid-ID in long format
+    into a string.
+
+    :param grid_id: Input geotile grid-id. The input can be a single- or
+                    multi-valued column or an expression.
+    """
+    return InstrumentedExpression(f"ST_GEOTILE_TO_STRING({_render(grid_id)})")
+
+
+def st_intersects(
+    geom_a: ExpressionType, geom_b: ExpressionType
+) -> InstrumentedExpression:
+    """Returns true if two geometries intersect. They intersect if they have
+    any point in common, including their interior points (points along lines or
+    within polygons). This is the inverse of the ST_DISJOINT function. In
+    mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅
+
+    :param geom_a: Expression of type `geo_point`, `cartesian_point`,
+                   `geo_shape` or `cartesian_shape`. If `null`, the function returns
+                   `null`.
+    :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape`
+                   or `cartesian_shape`. If `null`, the function returns `null`. The
+                   second parameter must also have the same coordinate system as the
+                   first. This means it is not possible to combine `geo_*` and
+                   `cartesian_*` parameters.
+    """
+    return InstrumentedExpression(
+        f"ST_INTERSECTS({_render(geom_a)}, {_render(geom_b)})"
+    )
+
+
+def st_within(geom_a: ExpressionType, geom_b: ExpressionType) -> InstrumentedExpression:
+    """Returns whether the first geometry is within the second geometry. This
+    is the inverse of the ST_CONTAINS function.
+
+    :param geom_a: Expression of type `geo_point`, `cartesian_point`,
+                   `geo_shape` or `cartesian_shape`. If `null`, the function returns
+                   `null`.
+    :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape`
+                   or `cartesian_shape`. If `null`, the function returns `null`. The
+                   second parameter must also have the same coordinate system as the
+                   first. This means it is not possible to combine `geo_*` and
+                   `cartesian_*` parameters.
+    """
+    return InstrumentedExpression(f"ST_WITHIN({_render(geom_a)}, {_render(geom_b)})")
+
+
+def st_x(point: ExpressionType) -> InstrumentedExpression:
+    """Extracts the `x` coordinate from the supplied point. If the points is of
+    type `geo_point` this is equivalent to extracting the `longitude` value.
+
+    :param point: Expression of type `geo_point` or `cartesian_point`. If
+                  `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ST_X({_render(point)})")
+
+
+def st_xmax(point: ExpressionType) -> InstrumentedExpression:
+    """Extracts the maximum value of the `x` coordinates from the supplied
+    geometry. If the geometry is of type `geo_point` or `geo_shape` this is
+    equivalent to extracting the maximum `longitude` value.
+
+    :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point`
+                  or `cartesian_shape`. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ST_XMAX({_render(point)})")
+
+
+def st_xmin(point: ExpressionType) -> InstrumentedExpression:
+    """Extracts the minimum value of the `x` coordinates from the supplied
+    geometry. If the geometry is of type `geo_point` or `geo_shape` this is
+    equivalent to extracting the minimum `longitude` value.
+
+    :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point`
+                  or `cartesian_shape`. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ST_XMIN({_render(point)})")
+
+
+def st_y(point: ExpressionType) -> InstrumentedExpression:
+    """Extracts the `y` coordinate from the supplied point. If the points is of
+    type `geo_point` this is equivalent to extracting the `latitude` value.
+
+    :param point: Expression of type `geo_point` or `cartesian_point`. If
+                  `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ST_Y({_render(point)})")
+
+
+def st_ymax(point: ExpressionType) -> InstrumentedExpression:
+    """Extracts the maximum value of the `y` coordinates from the supplied
+    geometry. If the geometry is of type `geo_point` or `geo_shape` this is
+    equivalent to extracting the maximum `latitude` value.
+
+    :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point`
+                  or `cartesian_shape`. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ST_YMAX({_render(point)})")
+
+
+def st_ymin(point: ExpressionType) -> InstrumentedExpression:
+    """Extracts the minimum value of the `y` coordinates from the supplied
+    geometry. If the geometry is of type `geo_point` or `geo_shape` this is
+    equivalent to extracting the minimum `latitude` value.
+
+    :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point`
+                  or `cartesian_shape`. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"ST_YMIN({_render(point)})")
+
+
+def substring(
+    string: ExpressionType, start: ExpressionType, length: ExpressionType = None
+) -> InstrumentedExpression:
+    """Returns a substring of a string, specified by a start position and an
+    optional length.
+
+    :param string: String expression. If `null`, the function returns `null`.
+    :param start: Start position.
+    :param length: Length of the substring from the start position. Optional; if
+                   omitted, all positions after `start` are returned.
+    """
+    if length is not None:
+        return InstrumentedExpression(
+            f"SUBSTRING({_render(string)}, {_render(start)}, {_render(length)})"
+        )
+    else:
+        return InstrumentedExpression(f"SUBSTRING({_render(string)}, {_render(start)})")
+
+
+def sum(number: ExpressionType) -> InstrumentedExpression:
+    """The sum of a numeric expression.
+
+    :param number:
+    """
+    return InstrumentedExpression(f"SUM({_render(number)})")
+
+
+def sum_over_time(field: ExpressionType) -> InstrumentedExpression:
+    """Calculates the sum over time value of a field."""
+    return InstrumentedExpression(f"SUM({_render(field)})")
+
+
+def tan(angle: ExpressionType) -> InstrumentedExpression:
+    """Returns the tangent of an angle.
+
+    :param angle: An angle, in radians. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"TAN({_render(angle)})")
+
+
+def tanh(number: ExpressionType) -> InstrumentedExpression:
+    """Returns the hyperbolic tangent of a number.
+
+    :param number: Numeric expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"TANH({_render(number)})")
+
+
+def tau() -> InstrumentedExpression:
+    """Returns the ratio of a circle’s circumference to its radius."""
+    return InstrumentedExpression("TAU()")
+
+
+def term(field: ExpressionType, query: ExpressionType) -> InstrumentedExpression:
+    """Performs a Term query on the specified field. Returns true if the
+    provided term matches the row.
+
+    :param field: Field that the query will target.
+    :param query: Term you wish to find in the provided field.
+    """
+    return InstrumentedExpression(f"TERM({_render(field)}, {_render(query)})")
+
+
+def text_embedding(
+    text: ExpressionType, inference_id: ExpressionType
+) -> InstrumentedExpression:
+    """Generates dense vector embeddings from text input using a specified inference endpoint.
+    Use this function to generate query vectors for KNN searches against your vectorized data
+    or others dense vector based operations."""
+    return InstrumentedExpression(
+        f"TEXT_EMBEDDING({_render(text)}, {_render(inference_id)})"
+    )
+
+
+def top(
+    field: ExpressionType, limit: ExpressionType, order: ExpressionType
+) -> InstrumentedExpression:
+    """Collects the top values for a field. Includes repeated values.
+
+    :param field: The field to collect the top values for.
+    :param limit: The maximum number of values to collect.
+    :param order: The order to calculate the top values. Either `asc` or `desc`.
+    """
+    return InstrumentedExpression(
+        f"TOP({_render(field)}, {_render(limit)}, {_render(order)})"
+    )
+
+
+def to_aggregate_metric_double(number: ExpressionType) -> InstrumentedExpression:
+    """Encode a numeric to an aggregate_metric_double.
+
+    :param number: Input value. The input can be a single- or multi-valued
+                   column or an expression.
+    """
+    return InstrumentedExpression(f"TO_AGGREGATE_METRIC_DOUBLE({_render(number)})")
+
+
+def to_base64(string: ExpressionType) -> InstrumentedExpression:
+    """Encode a string to a base64 string.
+
+    :param string: A string.
+    """
+    return InstrumentedExpression(f"TO_BASE64({_render(string)})")
+
+
+def to_boolean(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a boolean value. A string value of `true`
+    will be case-insensitive converted to the Boolean `true`. For anything
+    else, including the empty string, the function will return `false`. The
+    numerical value of `0` will be converted to `false`, anything else will be
+    converted to `true`.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_BOOLEAN({_render(field)})")
+
+
+def to_cartesianpoint(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a `cartesian_point` value. A string will only
+    be successfully converted if it respects the WKT Point format.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_CARTESIANPOINT({_render(field)})")
+
+
+def to_cartesianshape(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a `cartesian_shape` value. A string will only
+    be successfully converted if it respects the WKT format.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_CARTESIANSHAPE({_render(field)})")
+
+
+def to_dateperiod(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value into a `date_period` value.
+
+    :param field: Input value. The input is a valid constant date period expression.
+    """
+    return InstrumentedExpression(f"TO_DATEPERIOD({_render(field)})")
+
+
+def to_datetime(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a date value. A string will only be
+    successfully converted if it’s respecting the format
+    `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use `DATE_PARSE`.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_DATETIME({_render(field)})")
+
+
+def to_date_nanos(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input to a nanosecond-resolution date value (aka date_nanos).
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_DATE_NANOS({_render(field)})")
+
+
+def to_degrees(number: ExpressionType) -> InstrumentedExpression:
+    """Converts a number in radians to degrees).
+
+    :param number: Input value. The input can be a single- or multi-valued
+                   column or an expression.
+    """
+    return InstrumentedExpression(f"TO_DEGREES({_render(number)})")
+
+
+def to_double(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a double value. If the input parameter is of
+    a date type, its value will be interpreted as milliseconds since the Unix
+    epoch, converted to double. Boolean `true` will be converted to double
+    `1.0`, `false` to `0.0`.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_DOUBLE({_render(field)})")
+
+
+def to_geohash(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a geohash value. A string will only be successfully
+    converted if it respects the geohash format, as described for the geohash grid
+    aggregation.
+    """
+    return InstrumentedExpression(f"TO_GEOHASH({_render(field)})")
+
+
+def to_geohex(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a geohex value. A string will only be successfully
+    converted if it respects the geohex format, as described for the geohex grid
+    aggregation.
+    """
+    return InstrumentedExpression(f"TO_GEOHEX({_render(field)})")
+
+
+def to_geopoint(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a `geo_point` value. A string will only be
+    successfully converted if it respects the WKT Point format.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_GEOPOINT({_render(field)})")
+
+
+def to_geoshape(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a `geo_shape` value. A string will only be
+    successfully converted if it respects the WKT format.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_GEOSHAPE({_render(field)})")
+
+
+def to_geotile(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a geotile value. A string will only be successfully
+    converted if it respects the geotile format, as described for the geotile grid
+    aggregation.
+    """
+    return InstrumentedExpression(f"TO_GEOTILE({_render(field)})")
+
+
+def to_integer(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to an integer value. If the input parameter is
+    of a date type, its value will be interpreted as milliseconds since the
+    Unix epoch, converted to integer. Boolean `true` will be converted to
+    integer `1`, `false` to `0`.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_INTEGER({_render(field)})")
+
+
+def to_ip(
+    field: ExpressionType, options: ExpressionType = None
+) -> InstrumentedExpression:
+    """Converts an input string to an IP value.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    :param options: (Optional) Additional options.
+    """
+    if options is not None:
+        return InstrumentedExpression(f"TO_IP({_render(field)}, {_render(options)})")
+    else:
+        return InstrumentedExpression(f"TO_IP({_render(field)})")
+
+
+def to_long(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to a long value. If the input parameter is of a
+    date type, its value will be interpreted as milliseconds since the Unix
+    epoch, converted to long. Boolean `true` will be converted to long `1`,
+    `false` to `0`.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_LONG({_render(field)})")
+
+
+def to_lower(str: ExpressionType) -> InstrumentedExpression:
+    """Returns a new string representing the input string converted to lower case.
+
+    :param str: String expression. If `null`, the function returns `null`. The
+                input can be a single-valued column or expression, or a multi-valued
+                column or expression.
+    """
+    return InstrumentedExpression(f"TO_LOWER({_render(str)})")
+
+
+def to_radians(number: ExpressionType) -> InstrumentedExpression:
+    """Converts a number in degrees) to radians.
+
+    :param number: Input value. The input can be a single- or multi-valued
+                   column or an expression.
+    """
+    return InstrumentedExpression(f"TO_RADIANS({_render(number)})")
+
+
+def to_string(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value into a string.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_STRING({_render(field)})")
+
+
+def to_timeduration(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value into a `time_duration` value.
+
+    :param field: Input value. The input is a valid constant time duration expression.
+    """
+    return InstrumentedExpression(f"TO_TIMEDURATION({_render(field)})")
+
+
+def to_unsigned_long(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input value to an unsigned long value. If the input
+    parameter is of a date type, its value will be interpreted as milliseconds
+    since the Unix epoch, converted to unsigned long. Boolean `true` will be
+    converted to unsigned long `1`, `false` to `0`.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_UNSIGNED_LONG({_render(field)})")
+
+
+def to_upper(str: ExpressionType) -> InstrumentedExpression:
+    """Returns a new string representing the input string converted to upper case.
+
+    :param str: String expression. If `null`, the function returns `null`. The
+                input can be a single-valued column or expression, or a multi-valued
+                column or expression.
+    """
+    return InstrumentedExpression(f"TO_UPPER({_render(str)})")
+
+
+def to_version(field: ExpressionType) -> InstrumentedExpression:
+    """Converts an input string to a version value.
+
+    :param field: Input value. The input can be a single- or multi-valued column
+                  or an expression.
+    """
+    return InstrumentedExpression(f"TO_VERSION({_render(field)})")
+
+
+def trim(string: ExpressionType) -> InstrumentedExpression:
+    """Removes leading and trailing whitespaces from a string.
+
+    :param string: String expression. If `null`, the function returns `null`.
+    """
+    return InstrumentedExpression(f"TRIM({_render(string)})")
+
+
+def values(field: ExpressionType) -> InstrumentedExpression:
+    """Returns unique values as a multivalued field. The order of the returned
+    values isn’t guaranteed. If you need the values returned in order use `MV_SORT`.
+
+    :param field:
+    """
+    return InstrumentedExpression(f"VALUES({_render(field)})")
+
+
+def weighted_avg(
+    number: ExpressionType, weight: ExpressionType
+) -> InstrumentedExpression:
+    """The weighted average of a numeric expression.
+
+    :param number: A numeric value.
+    :param weight: A numeric weight.
+    """
+    return InstrumentedExpression(f"WEIGHTED_AVG({_render(number)}, {_render(weight)})")
diff -pruN 8.17.2-2/elasticsearch/exceptions.py 9.2.0-1/elasticsearch/exceptions.py
--- 8.17.2-2/elasticsearch/exceptions.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/exceptions.py	2025-10-28 16:50:53.000000000 +0000
@@ -61,6 +61,7 @@ class ApiError(_ApiError):
             if self.body and isinstance(self.body, dict) and "error" in self.body:
                 if isinstance(self.body["error"], dict):
                     root_cause = self.body["error"]["root_cause"][0]
+                    caused_by = self.body["error"].get("caused_by", {})
                     cause = ", ".join(
                         filter(
                             None,
@@ -68,6 +69,7 @@ class ApiError(_ApiError):
                                 repr(root_cause["reason"]),
                                 root_cause.get("resource.id"),
                                 root_cause.get("resource.type"),
+                                caused_by.get("reason"),
                             ],
                         )
                     )
diff -pruN 8.17.2-2/elasticsearch/helpers/__init__.py 9.2.0-1/elasticsearch/helpers/__init__.py
--- 8.17.2-2/elasticsearch/helpers/__init__.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/helpers/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -19,12 +19,21 @@ from .._async.helpers import async_bulk,
 from .._utils import fixup_module_metadata
 from .actions import _chunk_actions  # noqa: F401
 from .actions import _process_bulk_chunk  # noqa: F401
-from .actions import bulk, expand_action, parallel_bulk, reindex, scan, streaming_bulk
+from .actions import (
+    BULK_FLUSH,
+    bulk,
+    expand_action,
+    parallel_bulk,
+    reindex,
+    scan,
+    streaming_bulk,
+)
 from .errors import BulkIndexError, ScanError
 
 __all__ = [
     "BulkIndexError",
     "ScanError",
+    "BULK_FLUSH",
     "expand_action",
     "streaming_bulk",
     "bulk",
diff -pruN 8.17.2-2/elasticsearch/helpers/actions.py 9.2.0-1/elasticsearch/helpers/actions.py
--- 8.17.2-2/elasticsearch/helpers/actions.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/helpers/actions.py	2025-10-28 16:50:53.000000000 +0000
@@ -16,9 +16,10 @@
 #  under the License.
 
 import logging
+import queue
 import time
+from enum import Enum
 from operator import methodcaller
-from queue import Queue
 from typing import (
     Any,
     Callable,
@@ -37,13 +38,21 @@ from typing import (
 from elastic_transport import OpenTelemetrySpan
 
 from .. import Elasticsearch
-from ..compat import to_bytes
+from ..compat import safe_thread, to_bytes
 from ..exceptions import ApiError, NotFoundError, TransportError
 from ..serializer import Serializer
 from .errors import BulkIndexError, ScanError
 
 logger = logging.getLogger("elasticsearch.helpers")
 
+
+class BulkMeta(Enum):
+    flush = 1
+    done = 2
+
+
+BULK_FLUSH = BulkMeta.flush
+
 _TYPE_BULK_ACTION = Union[bytes, str, Dict[str, Any]]
 _TYPE_BULK_ACTION_HEADER = Dict[str, Any]
 _TYPE_BULK_ACTION_BODY = Union[None, bytes, Dict[str, Any]]
@@ -51,6 +60,13 @@ _TYPE_BULK_ACTION_HEADER_AND_BODY = Tupl
     _TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY
 ]
 
+_TYPE_BULK_ACTION_WITH_META = Union[bytes, str, Dict[str, Any], BulkMeta]
+_TYPE_BULK_ACTION_HEADER_WITH_META = Union[Dict[str, Any], BulkMeta]
+_TYPE_BULK_ACTION_HEADER_WITH_META_AND_BODY = Union[
+    Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
+    Tuple[BulkMeta, Any],
+]
+
 
 def expand_action(data: _TYPE_BULK_ACTION) -> _TYPE_BULK_ACTION_HEADER_AND_BODY:
     """
@@ -139,7 +155,9 @@ class _ActionChunker:
         ] = []
 
     def feed(
-        self, action: _TYPE_BULK_ACTION_HEADER, data: _TYPE_BULK_ACTION_BODY
+        self,
+        action: _TYPE_BULK_ACTION_HEADER_WITH_META,
+        data: _TYPE_BULK_ACTION_BODY,
     ) -> Optional[
         Tuple[
             List[
@@ -152,23 +170,25 @@ class _ActionChunker:
         ]
     ]:
         ret = None
-        raw_action = action
-        raw_data = data
-        action_bytes = to_bytes(self.serializer.dumps(action), "utf-8")
-        # +1 to account for the trailing new line character
-        cur_size = len(action_bytes) + 1
-
-        data_bytes: Optional[bytes]
-        if data is not None:
-            data_bytes = to_bytes(self.serializer.dumps(data), "utf-8")
-            cur_size += len(data_bytes) + 1
-        else:
-            data_bytes = None
+        action_bytes = b""
+        data_bytes: Optional[bytes] = None
+        cur_size = 0
+        if not isinstance(action, BulkMeta):
+            action_bytes = to_bytes(self.serializer.dumps(action), "utf-8")
+            # +1 to account for the trailing new line character
+            cur_size = len(action_bytes) + 1
+
+            if data is not None:
+                data_bytes = to_bytes(self.serializer.dumps(data), "utf-8")
+                cur_size += len(data_bytes) + 1
+            else:
+                data_bytes = None
 
         # full chunk, send it and start a new one
         if self.bulk_actions and (
             self.size + cur_size > self.max_chunk_bytes
             or self.action_count == self.chunk_size
+            or (action == BulkMeta.flush and self.bulk_actions)
         ):
             ret = (self.bulk_data, self.bulk_actions)
             self.bulk_actions = []
@@ -176,15 +196,16 @@ class _ActionChunker:
             self.size = 0
             self.action_count = 0
 
-        self.bulk_actions.append(action_bytes)
-        if data_bytes is not None:
-            self.bulk_actions.append(data_bytes)
-            self.bulk_data.append((raw_action, raw_data))
-        else:
-            self.bulk_data.append((raw_action,))
+        if not isinstance(action, BulkMeta):
+            self.bulk_actions.append(action_bytes)
+            if data_bytes is not None:
+                self.bulk_actions.append(data_bytes)
+                self.bulk_data.append((action, data))
+            else:
+                self.bulk_data.append((action,))
 
-        self.size += cur_size
-        self.action_count += 1
+            self.size += cur_size
+            self.action_count += 1
         return ret
 
     def flush(
@@ -209,9 +230,10 @@ class _ActionChunker:
 
 
 def _chunk_actions(
-    actions: Iterable[_TYPE_BULK_ACTION_HEADER_AND_BODY],
+    actions: Iterable[_TYPE_BULK_ACTION_HEADER_WITH_META_AND_BODY],
     chunk_size: int,
     max_chunk_bytes: int,
+    flush_after_seconds: Optional[float],
     serializer: Serializer,
 ) -> Iterable[
     Tuple[
@@ -231,10 +253,41 @@ def _chunk_actions(
     chunker = _ActionChunker(
         chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, serializer=serializer
     )
-    for action, data in actions:
-        ret = chunker.feed(action, data)
-        if ret:
-            yield ret
+
+    if not flush_after_seconds:
+        for action, data in actions:
+            ret = chunker.feed(action, data)
+            if ret:
+                yield ret
+    else:
+        item_queue: queue.Queue[_TYPE_BULK_ACTION_HEADER_WITH_META_AND_BODY] = (
+            queue.Queue(maxsize=1)
+        )
+
+        def get_items() -> None:
+            try:
+                for item in actions:
+                    item_queue.put(item)
+            finally:
+                # make sure we signal the end even if there is an exception
+                item_queue.put((BulkMeta.done, None))
+
+        with safe_thread(get_items):
+            timeout: Optional[float] = flush_after_seconds
+            while True:
+                try:
+                    action, data = item_queue.get(timeout=timeout)
+                    timeout = flush_after_seconds
+                except queue.Empty:
+                    action, data = BulkMeta.flush, None
+                    timeout = None
+
+                if action is BulkMeta.done:
+                    break
+                ret = chunker.feed(action, data)
+                if ret:
+                    yield ret
+
     ret = chunker.flush()
     if ret:
         yield ret
@@ -361,9 +414,10 @@ def _process_bulk_chunk(
 
 def streaming_bulk(
     client: Elasticsearch,
-    actions: Iterable[_TYPE_BULK_ACTION],
+    actions: Iterable[_TYPE_BULK_ACTION_WITH_META],
     chunk_size: int = 500,
     max_chunk_bytes: int = 100 * 1024 * 1024,
+    flush_after_seconds: Optional[float] = None,
     raise_on_error: bool = True,
     expand_action_callback: Callable[
         [_TYPE_BULK_ACTION], _TYPE_BULK_ACTION_HEADER_AND_BODY
@@ -397,6 +451,9 @@ def streaming_bulk(
     :arg actions: iterable containing the actions to be executed
     :arg chunk_size: number of docs in one chunk sent to es (default: 500)
     :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
+    :arg flush_after_seconds: time in seconds after which a chunk is written even
+        if hasn't reached `chunk_size` or `max_chunk_bytes`. Set to 0 to not use a
+        timeout-based flush. (default: 0)
     :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
         from the execution of the last chunk when some occur. By default we raise.
     :arg raise_on_exception: if ``False`` then don't propagate exceptions from
@@ -425,6 +482,13 @@ def streaming_bulk(
 
         serializer = client.transport.serializers.get_serializer("application/json")
 
+        def expand_action_with_meta(
+            data: _TYPE_BULK_ACTION_WITH_META,
+        ) -> _TYPE_BULK_ACTION_HEADER_WITH_META_AND_BODY:
+            if isinstance(data, BulkMeta):
+                return data, None
+            return expand_action_callback(data)
+
         bulk_data: List[
             Union[
                 Tuple[_TYPE_BULK_ACTION_HEADER],
@@ -433,9 +497,10 @@ def streaming_bulk(
         ]
         bulk_actions: List[bytes]
         for bulk_data, bulk_actions in _chunk_actions(
-            map(expand_action_callback, actions),
+            map(expand_action_with_meta, actions),
             chunk_size,
             max_chunk_bytes,
+            flush_after_seconds,
             serializer,
         ):
             for attempt in range(max_retries + 1):
@@ -557,6 +622,7 @@ def parallel_bulk(
     thread_count: int = 4,
     chunk_size: int = 500,
     max_chunk_bytes: int = 100 * 1024 * 1024,
+    flush_after_seconds: Optional[float] = None,
     queue_size: int = 4,
     expand_action_callback: Callable[
         [_TYPE_BULK_ACTION], _TYPE_BULK_ACTION_HEADER_AND_BODY
@@ -573,6 +639,9 @@ def parallel_bulk(
     :arg thread_count: size of the threadpool to use for the bulk requests
     :arg chunk_size: number of docs in one chunk sent to es (default: 500)
     :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
+    :arg flush_after_seconds: time in seconds after which a chunk is written even
+        if hasn't reached `chunk_size` or `max_chunk_bytes`. Set to 0 to not use a
+        timeout-based flush. (default: 0)
     :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
         from the execution of the last chunk when some occur. By default we raise.
     :arg raise_on_exception: if ``False`` then don't propagate exceptions from
@@ -596,7 +665,7 @@ def parallel_bulk(
             super()._setup_queues()  # type: ignore[misc]
             # The queue must be at least the size of the number of threads to
             # prevent hanging when inserting sentinel values during teardown.
-            self._inqueue: Queue[
+            self._inqueue: queue.Queue[
                 Tuple[
                     List[
                         Union[
@@ -605,7 +674,7 @@ def parallel_bulk(
                     ],
                     List[bytes],
                 ]
-            ] = Queue(max(queue_size, thread_count))
+            ] = queue.Queue(max(queue_size, thread_count))
             self._quick_put = self._inqueue.put
 
     with client._otel.helpers_span("helpers.parallel_bulk") as otel_span:
@@ -625,7 +694,11 @@ def parallel_bulk(
                     )
                 ),
                 _chunk_actions(
-                    expanded_actions, chunk_size, max_chunk_bytes, serializer
+                    expanded_actions,
+                    chunk_size,
+                    max_chunk_bytes,
+                    flush_after_seconds,
+                    serializer,
                 ),
             ):
                 yield from result
diff -pruN 8.17.2-2/elasticsearch/helpers/vectorstore/_async/strategies.py 9.2.0-1/elasticsearch/helpers/vectorstore/_async/strategies.py
--- 8.17.2-2/elasticsearch/helpers/vectorstore/_async/strategies.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/helpers/vectorstore/_async/strategies.py	2025-10-28 16:50:53.000000000 +0000
@@ -96,7 +96,7 @@ class AsyncRetrievalStrategy(ABC):
 
 
 class AsyncSparseVectorStrategy(AsyncRetrievalStrategy):
-    """Sparse retrieval strategy using the `text_expansion` processor."""
+    """Sparse retrieval strategy using the `sparse_vector` processor."""
 
     def __init__(self, model_id: str = ".elser_model_2"):
         self.model_id = model_id
@@ -127,11 +127,10 @@ class AsyncSparseVectorStrategy(AsyncRet
                 "bool": {
                     "must": [
                         {
-                            "text_expansion": {
-                                f"{vector_field}.{self._tokens_field}": {
-                                    "model_id": self.model_id,
-                                    "model_text": query,
-                                }
+                            "sparse_vector": {
+                                "field": f"{vector_field}.{self._tokens_field}",
+                                "inference_id": self.model_id,
+                                "query": query,
                             }
                         }
                     ],
@@ -150,7 +149,7 @@ class AsyncSparseVectorStrategy(AsyncRet
         mappings: Dict[str, Any] = {
             "properties": {
                 vector_field: {
-                    "properties": {self._tokens_field: {"type": "rank_features"}}
+                    "properties": {self._tokens_field: {"type": "sparse_vector"}}
                 }
             }
         }
@@ -172,11 +171,12 @@ class AsyncSparseVectorStrategy(AsyncRet
                     {
                         "inference": {
                             "model_id": self.model_id,
-                            "target_field": vector_field,
-                            "field_map": {text_field: "text_field"},
-                            "inference_config": {
-                                "text_expansion": {"results_field": self._tokens_field}
-                            },
+                            "input_output": [
+                                {
+                                    "input_field": text_field,
+                                    "output_field": f"{vector_field}.{self._tokens_field}",
+                                },
+                            ],
                         }
                     }
                 ],
diff -pruN 8.17.2-2/elasticsearch/helpers/vectorstore/_sync/strategies.py 9.2.0-1/elasticsearch/helpers/vectorstore/_sync/strategies.py
--- 8.17.2-2/elasticsearch/helpers/vectorstore/_sync/strategies.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/helpers/vectorstore/_sync/strategies.py	2025-10-28 16:50:53.000000000 +0000
@@ -96,7 +96,7 @@ class RetrievalStrategy(ABC):
 
 
 class SparseVectorStrategy(RetrievalStrategy):
-    """Sparse retrieval strategy using the `text_expansion` processor."""
+    """Sparse retrieval strategy using the `sparse_vector` processor."""
 
     def __init__(self, model_id: str = ".elser_model_2"):
         self.model_id = model_id
@@ -127,11 +127,10 @@ class SparseVectorStrategy(RetrievalStra
                 "bool": {
                     "must": [
                         {
-                            "text_expansion": {
-                                f"{vector_field}.{self._tokens_field}": {
-                                    "model_id": self.model_id,
-                                    "model_text": query,
-                                }
+                            "sparse_vector": {
+                                "field": f"{vector_field}.{self._tokens_field}",
+                                "inference_id": self.model_id,
+                                "query": query,
                             }
                         }
                     ],
@@ -150,7 +149,7 @@ class SparseVectorStrategy(RetrievalStra
         mappings: Dict[str, Any] = {
             "properties": {
                 vector_field: {
-                    "properties": {self._tokens_field: {"type": "rank_features"}}
+                    "properties": {self._tokens_field: {"type": "sparse_vector"}}
                 }
             }
         }
@@ -172,11 +171,12 @@ class SparseVectorStrategy(RetrievalStra
                     {
                         "inference": {
                             "model_id": self.model_id,
-                            "target_field": vector_field,
-                            "field_map": {text_field: "text_field"},
-                            "inference_config": {
-                                "text_expansion": {"results_field": self._tokens_field}
-                            },
+                            "input_output": [
+                                {
+                                    "input_field": text_field,
+                                    "output_field": f"{vector_field}.{self._tokens_field}",
+                                },
+                            ],
                         }
                     }
                 ],
diff -pruN 8.17.2-2/elasticsearch/transport.py 9.2.0-1/elasticsearch/transport.py
--- 8.17.2-2/elasticsearch/transport.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/elasticsearch/transport.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,57 +0,0 @@
-#  Licensed to Elasticsearch B.V. under one or more contributor
-#  license agreements. See the NOTICE file distributed with
-#  this work for additional information regarding copyright
-#  ownership. Elasticsearch B.V. licenses this file to you under
-#  the Apache License, Version 2.0 (the "License"); you may
-#  not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-# 	http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing,
-#  software distributed under the License is distributed on an
-#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-#  KIND, either express or implied.  See the License for the
-#  specific language governing permissions and limitations
-#  under the License.
-
-import warnings
-from typing import Any, Dict, Optional, Union
-
-from elastic_transport import AsyncTransport, Transport  # noqa: F401
-
-# This file exists for backwards compatibility.
-warnings.warn(
-    "Importing from the 'elasticsearch.transport' module is deprecated. "
-    "Instead import from 'elastic_transport'",
-    category=DeprecationWarning,
-    stacklevel=2,
-)
-
-
-def get_host_info(
-    node_info: Dict[str, Any], host: Dict[str, Union[int, str]]
-) -> Optional[Dict[str, Union[int, str]]]:
-    """
-    Simple callback that takes the node info from `/_cluster/nodes` and a
-    parsed connection information and return the connection information. If
-    `None` is returned this node will be skipped.
-    Useful for filtering nodes (by proximity for example) or if additional
-    information needs to be provided for the :class:`~elasticsearch.Connection`
-    class. By default master only nodes are filtered out since they shouldn't
-    typically be used for API operations.
-    :arg node_info: node information from `/_cluster/nodes`
-    :arg host: connection information (host, port) extracted from the node info
-    """
-
-    warnings.warn(
-        "The 'get_host_info' function is deprecated. Instead "
-        "use the 'sniff_node_callback' parameter on the client",
-        category=DeprecationWarning,
-        stacklevel=2,
-    )
-
-    # ignore master only nodes
-    if node_info.get("roles", []) == ["master"]:
-        return None
-    return host
diff -pruN 8.17.2-2/examples/dsl/README.rst 9.2.0-1/examples/dsl/README.rst
--- 8.17.2-2/examples/dsl/README.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/README.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,47 @@
+Elasticsearch DSL Examples
+==========================
+
+In this directory you can see several complete examples demonstrating key
+concepts and patterns exposed by ``elasticsearch-dsl``.
+
+``alias_migration.py``
+----------------------
+
+The alias migration example shows a useful pattern where we use versioned
+indices (``test-blog-0``, ``test-blog-1``, ...) to manage schema changes and
+hides that behind an alias so that the application doesn't have to be aware of
+the versions and just refer to the ``test-blog`` alias for both read and write
+operations.
+
+For simplicity we use a timestamp as version in the index name.
+
+``parent_child.py``
+-------------------
+
+More complex example highlighting the possible relationships available in
+elasticsearch - `parent/child
+<https://www.elastic.co/guide/en/elasticsearch/reference/6.3/nested.html>`_ and
+`nested
+<https://www.elastic.co/guide/en/elasticsearch/reference/6.3/nested.html>`_.
+
+``composite_agg.py``
+--------------------
+
+A helper function using the `composite aggregation
+<https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-composite-aggregation.html>`_
+to paginate over aggregation results.
+
+``percolate.py``
+----------------
+
+A ``BlogPost`` document with automatic classification using the `percolator
+<https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-percolate-query.html>`_
+functionality.
+
+``completion.py``
+-----------------
+
+As example using `completion suggester
+<https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters-completion.html>`_
+to auto complete people's names.
+
diff -pruN 8.17.2-2/examples/dsl/alias_migration.py 9.2.0-1/examples/dsl/alias_migration.py
--- 8.17.2-2/examples/dsl/alias_migration.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/alias_migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,161 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Simple example with a single Document demonstrating how schema can be managed,
+including upgrading with reindexing.
+
+Key concepts:
+
+    * setup() function to first initialize the schema (as index template) in
+      elasticsearch. Can be called any time (recommended with every deploy of
+      your app).
+
+    * migrate() function to be called any time when the schema changes - it
+      will create a new index (by incrementing the version) and update the alias.
+      By default it will also (before flipping the alias) move the data from the
+      previous index to the new one.
+
+    * BlogPost._matches() class method is required for this code to work since
+      otherwise BlogPost will not be used to deserialize the documents as those
+      will have index set to the concrete index whereas the class refers to the
+      alias.
+"""
+import os
+from datetime import datetime
+from fnmatch import fnmatch
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+
+from elasticsearch.dsl import Document, Keyword, connections, mapped_field
+
+ALIAS = "test-blog"
+PATTERN = ALIAS + "-*"
+PRIORITY = 100
+
+
+class BlogPost(Document):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    title: str
+    tags: List[str] = mapped_field(Keyword())
+    content: str
+    published: Optional[datetime] = mapped_field(default=None)
+
+    def is_published(self) -> bool:
+        return bool(self.published and datetime.now() > self.published)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # override _matches to match indices in a pattern instead of just ALIAS
+        # hit is the raw dict as returned by elasticsearch
+        return fnmatch(hit["_index"], PATTERN)
+
+    class Index:
+        # we will use an alias instead of the index
+        name = ALIAS
+        # set settings and possibly other attributes of the index like
+        # analyzers
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+def setup() -> None:
+    """
+    Create the index template in elasticsearch specifying the mappings and any
+    settings to be used. This can be run at any time, ideally at every new code
+    deploy.
+    """
+    # create an index template
+    index_template = BlogPost._index.as_composable_template(
+        ALIAS, PATTERN, priority=PRIORITY
+    )
+    # upload the template into elasticsearch
+    # potentially overriding the one already there
+    index_template.save()
+
+    # create the first index if it doesn't exist
+    if not BlogPost._index.exists():
+        migrate(move_data=False)
+
+
+def migrate(move_data: bool = True, update_alias: bool = True) -> None:
+    """
+    Upgrade function that creates a new index for the data. Optionally it also can
+    (and by default will) reindex previous copy of the data into the new index
+    (specify ``move_data=False`` to skip this step) and update the alias to
+    point to the latest index (set ``update_alias=False`` to skip).
+
+    Note that while this function is running the application can still perform
+    any and all searches without any loss of functionality. It should, however,
+    not perform any writes at this time as those might be lost.
+    """
+    # construct a new index name by appending current timestamp
+    next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f"))
+
+    # get the low level connection
+    es = connections.get_connection()
+
+    # create new index, it will use the settings from the template
+    es.indices.create(index=next_index)
+
+    if move_data:
+        # move data from current alias to the new index
+        es.options(request_timeout=3600).reindex(
+            body={"source": {"index": ALIAS}, "dest": {"index": next_index}}
+        )
+        # refresh the index to make the changes visible
+        es.indices.refresh(index=next_index)
+
+    if update_alias:
+        # repoint the alias to point to the newly created index
+        es.indices.update_aliases(
+            body={
+                "actions": [
+                    {"remove": {"alias": ALIAS, "index": PATTERN}},
+                    {"add": {"alias": ALIAS, "index": next_index}},
+                ]
+            }
+        )
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    setup()
+
+    # create a new document
+    bp = BlogPost(
+        _id=0,
+        title="Hello World!",
+        tags=["testing", "dummy"],
+        content=open(__file__).read(),
+    )
+    bp.save(refresh=True)
+
+    # create new index
+    migrate()
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/async/alias_migration.py 9.2.0-1/examples/dsl/async/alias_migration.py
--- 8.17.2-2/examples/dsl/async/alias_migration.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/alias_migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,162 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Simple example with a single Document demonstrating how schema can be managed,
+including upgrading with reindexing.
+
+Key concepts:
+
+    * setup() function to first initialize the schema (as index template) in
+      elasticsearch. Can be called any time (recommended with every deploy of
+      your app).
+
+    * migrate() function to be called any time when the schema changes - it
+      will create a new index (by incrementing the version) and update the alias.
+      By default it will also (before flipping the alias) move the data from the
+      previous index to the new one.
+
+    * BlogPost._matches() class method is required for this code to work since
+      otherwise BlogPost will not be used to deserialize the documents as those
+      will have index set to the concrete index whereas the class refers to the
+      alias.
+"""
+import asyncio
+import os
+from datetime import datetime
+from fnmatch import fnmatch
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+
+from elasticsearch.dsl import AsyncDocument, Keyword, async_connections, mapped_field
+
+ALIAS = "test-blog"
+PATTERN = ALIAS + "-*"
+PRIORITY = 100
+
+
+class BlogPost(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    title: str
+    tags: List[str] = mapped_field(Keyword())
+    content: str
+    published: Optional[datetime] = mapped_field(default=None)
+
+    def is_published(self) -> bool:
+        return bool(self.published and datetime.now() > self.published)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # override _matches to match indices in a pattern instead of just ALIAS
+        # hit is the raw dict as returned by elasticsearch
+        return fnmatch(hit["_index"], PATTERN)
+
+    class Index:
+        # we will use an alias instead of the index
+        name = ALIAS
+        # set settings and possibly other attributes of the index like
+        # analyzers
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def setup() -> None:
+    """
+    Create the index template in elasticsearch specifying the mappings and any
+    settings to be used. This can be run at any time, ideally at every new code
+    deploy.
+    """
+    # create an index template
+    index_template = BlogPost._index.as_composable_template(
+        ALIAS, PATTERN, priority=PRIORITY
+    )
+    # upload the template into elasticsearch
+    # potentially overriding the one already there
+    await index_template.save()
+
+    # create the first index if it doesn't exist
+    if not await BlogPost._index.exists():
+        await migrate(move_data=False)
+
+
+async def migrate(move_data: bool = True, update_alias: bool = True) -> None:
+    """
+    Upgrade function that creates a new index for the data. Optionally it also can
+    (and by default will) reindex previous copy of the data into the new index
+    (specify ``move_data=False`` to skip this step) and update the alias to
+    point to the latest index (set ``update_alias=False`` to skip).
+
+    Note that while this function is running the application can still perform
+    any and all searches without any loss of functionality. It should, however,
+    not perform any writes at this time as those might be lost.
+    """
+    # construct a new index name by appending current timestamp
+    next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f"))
+
+    # get the low level connection
+    es = async_connections.get_connection()
+
+    # create new index, it will use the settings from the template
+    await es.indices.create(index=next_index)
+
+    if move_data:
+        # move data from current alias to the new index
+        await es.options(request_timeout=3600).reindex(
+            body={"source": {"index": ALIAS}, "dest": {"index": next_index}}
+        )
+        # refresh the index to make the changes visible
+        await es.indices.refresh(index=next_index)
+
+    if update_alias:
+        # repoint the alias to point to the newly created index
+        await es.indices.update_aliases(
+            body={
+                "actions": [
+                    {"remove": {"alias": ALIAS, "index": PATTERN}},
+                    {"add": {"alias": ALIAS, "index": next_index}},
+                ]
+            }
+        )
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await setup()
+
+    # create a new document
+    bp = BlogPost(
+        _id=0,
+        title="Hello World!",
+        tags=["testing", "dummy"],
+        content=open(__file__).read(),
+    )
+    await bp.save(refresh=True)
+
+    # create new index
+    await migrate()
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/completion.py 9.2.0-1/examples/dsl/async/completion.py
--- 8.17.2-2/examples/dsl/async/completion.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/completion.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,114 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with completion suggester.
+
+In the ``Person`` class we index the person's name to allow auto completing in
+any order ("first last", "middle last first", ...). For the weight we use a
+value from the ``popularity`` field which is a long.
+
+To make the suggestions work in different languages we added a custom analyzer
+that does ascii folding.
+"""
+
+import asyncio
+import os
+from itertools import permutations
+from typing import TYPE_CHECKING, Any, Dict, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    Completion,
+    Keyword,
+    Long,
+    Text,
+    analyzer,
+    async_connections,
+    mapped_field,
+    token_filter,
+)
+
+# custom analyzer for names
+ascii_fold = analyzer(
+    "ascii_fold",
+    # we don't want to split O'Brian or Toulouse-Lautrec
+    tokenizer="whitespace",
+    filter=["lowercase", token_filter("ascii_fold", "asciifolding")],
+)
+
+
+class Person(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+    popularity: int = mapped_field(Long(), default=0)
+
+    # completion field with a custom analyzer
+    suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False)
+
+    def clean(self) -> None:
+        """
+        Automatically construct the suggestion input and weight by taking all
+        possible permutations of Person's name as ``input`` and taking their
+        popularity as ``weight``.
+        """
+        self.suggest = {
+            "input": [" ".join(p) for p in permutations(self.name.split())],
+            "weight": self.popularity,
+        }
+
+    class Index:
+        name = "test-suggest"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await Person.init()
+
+    # index some sample data
+    for id, (name, popularity) in enumerate(
+        [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)]
+    ):
+        await Person(_id=id, name=name, popularity=popularity).save()
+
+    # refresh index manually to make changes live
+    await Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Jara Cimr", "tou", "de hen"):
+        s = Person.search()
+        s = s.suggest("auto_complete", text, completion={"field": "suggest"})
+        response = await s.execute()
+
+        # print out all the options we got
+        for option in response.suggest["auto_complete"][0].options:
+            print("%10s: %25s (%d)" % (text, option._source.name, option._score))
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/composite_agg.py 9.2.0-1/examples/dsl/async/composite_agg.py
--- 8.17.2-2/examples/dsl/async/composite_agg.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/composite_agg.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,93 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import asyncio
+import os
+from typing import Any, AsyncIterator, Dict, Mapping, Sequence, cast
+
+from elasticsearch.dsl import Agg, AsyncSearch, Response, aggs, async_connections
+from elasticsearch.dsl.types import CompositeAggregate
+from elasticsearch.helpers import async_bulk
+from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX
+
+
+async def scan_aggs(
+    search: AsyncSearch,
+    source_aggs: Sequence[Mapping[str, Agg]],
+    inner_aggs: Dict[str, Agg] = {},
+    size: int = 10,
+) -> AsyncIterator[CompositeAggregate]:
+    """
+    Helper function used to iterate over all possible bucket combinations of
+    ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
+    ``composite`` aggregation under the hood to perform this.
+    """
+
+    async def run_search(**kwargs: Any) -> Response:
+        s = search[:0]
+        bucket = s.aggs.bucket(
+            "comp",
+            aggs.Composite(
+                sources=source_aggs,
+                size=size,
+                **kwargs,
+            ),
+        )
+        for agg_name, agg in inner_aggs.items():
+            bucket[agg_name] = agg
+        return await s.execute()
+
+    response = await run_search()
+    while response.aggregations["comp"].buckets:
+        for b in response.aggregations["comp"].buckets:
+            yield cast(CompositeAggregate, b)
+        if "after_key" in response.aggregations["comp"]:
+            after = response.aggregations["comp"].after_key
+        else:
+            after = response.aggregations["comp"].buckets[-1].key
+        response = await run_search(after=after)
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    client = async_connections.create_connection(
+        hosts=[os.environ["ELASTICSEARCH_URL"]]
+    )
+
+    # create the index and populate it with some data
+    # note that the dataset is imported from the library's test suite
+    await client.indices.delete(index="git", ignore_unavailable=True)
+    await client.indices.create(index="git", **GIT_INDEX)
+    await async_bulk(client, DATA, raise_on_error=True, refresh=True)
+
+    # run some aggregations on the data
+    async for b in scan_aggs(
+        AsyncSearch(index="git"),
+        [{"files": aggs.Terms(field="files")}],
+        {"first_seen": aggs.Min(field="committed_date")},
+    ):
+        print(
+            "File %s has been modified %d times, first seen at %s."
+            % (b.key.files, b.doc_count, b.first_seen.value_as_string)
+        )
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/esql_employees.py 9.2.0-1/examples/dsl/async/esql_employees.py
--- 8.17.2-2/examples/dsl/async/esql_employees.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/esql_employees.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,170 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# ES|QL query builder example
+
+Requirements:
+
+$ pip install "elasticsearch[async]" faker
+
+To run the example:
+
+$ python esql_employees.py "name to search"
+
+The index will be created automatically with a list of 1000 randomly generated
+employees if it does not exist. Add `--recreate-index` or `-r` to the command
+to regenerate it.
+
+Examples:
+
+$ python esql_employees "Mark"  # employees named Mark (first or last names)
+$ python esql_employees "Sarah" --limit 10  # up to 10 employees named Sarah
+$ python esql_employees "Sam" --sort height  # sort results by height
+$ python esql_employees "Sam" --sort name  # sort results by last name
+"""
+
+import argparse
+import asyncio
+import os
+import random
+
+from faker import Faker
+
+from elasticsearch.dsl import AsyncDocument, InnerDoc, M, async_connections
+from elasticsearch.esql import ESQLBase
+from elasticsearch.esql.functions import concat, multi_match
+
+fake = Faker()
+
+
+class Address(InnerDoc):
+    address: M[str]
+    city: M[str]
+    zip_code: M[str]
+
+
+class Employee(AsyncDocument):
+    emp_no: M[int]
+    first_name: M[str]
+    last_name: M[str]
+    height: M[float]
+    still_hired: M[bool]
+    address: M[Address]
+
+    class Index:
+        name = "employees"
+
+    @property
+    def name(self) -> str:
+        return f"{self.first_name} {self.last_name}"
+
+    def __repr__(self) -> str:
+        return f"<Employee[{self.meta.id}]: {self.first_name} {self.last_name}>"
+
+
+async def create(num_employees: int = 1000) -> None:
+    print("Creating a new employee index...")
+    if await Employee._index.exists():
+        await Employee._index.delete()
+    await Employee.init()
+
+    for i in range(num_employees):
+        address = Address(
+            address=fake.address(), city=fake.city(), zip_code=fake.zipcode()
+        )
+        emp = Employee(
+            emp_no=10000 + i,
+            first_name=fake.first_name(),
+            last_name=fake.last_name(),
+            height=int((random.random() * 0.8 + 1.5) * 1000) / 1000,
+            still_hired=random.random() >= 0.5,
+            address=address,
+        )
+        await emp.save()
+    await Employee._index.refresh()
+
+
+async def search(query: str, limit: int, sort: str) -> None:
+    q: ESQLBase = (
+        Employee.esql_from()
+        .where(multi_match(query, Employee.first_name, Employee.last_name))
+        .eval(full_name=concat(Employee.first_name, " ", Employee.last_name))
+    )
+    if sort == "height":
+        q = q.sort(Employee.height.desc())
+    elif sort == "name":
+        q = q.sort(Employee.last_name.asc())
+    q = q.limit(limit)
+    async for result in Employee.esql_execute(q, return_additional=True):
+        assert type(result) == tuple
+        employee = result[0]
+        full_name = result[1]["full_name"]
+        print(
+            f"{full_name:<20}",
+            f"{'Hired' if employee.still_hired else 'Not hired':<10}",
+            f"{employee.height:5.2f}m",
+            f"{employee.address.city:<20}",
+        )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Employee ES|QL example")
+    parser.add_argument(
+        "--recreate-index",
+        "-r",
+        action="store_true",
+        help="Recreate and populate the index",
+    )
+    parser.add_argument(
+        "--limit",
+        action="store",
+        type=int,
+        default=100,
+        help="Maximum number or employees to return (default: 100)",
+    )
+    parser.add_argument(
+        "--sort",
+        action="store",
+        type=str,
+        default=None,
+        help='Sort by "name" (ascending) or by "height" (descending) (default: no sorting)',
+    )
+    parser.add_argument(
+        "query", action="store", help="The name or partial name to search for"
+    )
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await Employee._index.exists():
+        await create()
+    await Employee.init()
+
+    await search(args.query, args.limit, args.sort)
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/parent_child.py 9.2.0-1/examples/dsl/async/parent_child.py
--- 8.17.2-2/examples/dsl/async/parent_child.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/parent_child.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,276 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Complex data model example modeling stackoverflow-like data.
+
+It is used to showcase several key features of elasticsearch-dsl:
+
+    * Object and Nested fields: see User and Comment classes and fields they
+      are used in
+
+        * method add_comment is used to add comments
+
+    * Parent/Child relationship
+
+        * See the Join field on Post creating the relationship between Question
+          and Answer
+
+        * Meta.matches allows the hits from same index to be wrapped in proper
+          classes
+
+        * to see how child objects are created see Question.add_answer
+
+        * Question.search_answers shows how to query for children of a
+          particular parent
+
+"""
+import asyncio
+import os
+from datetime import datetime
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    Date,
+    InnerDoc,
+    Join,
+    Keyword,
+    Long,
+    Text,
+    async_connections,
+    mapped_field,
+)
+
+
+class User(InnerDoc):
+    """
+    Class used to represent a denormalized user stored on other objects.
+    """
+
+    id: int = mapped_field(Long())
+    signed_up: Optional[datetime] = mapped_field(Date())
+    username: str = mapped_field(Text(fields={"keyword": Keyword()}))
+    email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+    location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+
+
+class Comment(InnerDoc):
+    """
+    Class wrapper for nested comment objects.
+    """
+
+    author: User
+    created: datetime
+    content: str
+
+
+class Post(AsyncDocument):
+    """
+    Base class for Question and Answer containing the common fields.
+    """
+
+    author: User
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _routing: str = mapped_field(default=None)
+        _id: Optional[int] = mapped_field(default=None)
+
+    created: Optional[datetime] = mapped_field(default=None)
+    body: str = mapped_field(default="")
+    comments: List[Comment] = mapped_field(default_factory=list)
+    question_answer: Any = mapped_field(
+        Join(relations={"question": "answer"}), default_factory=dict
+    )
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # Post is an abstract class, make sure it never gets used for
+        # deserialization
+        return False
+
+    class Index:
+        name = "test-qa-site"
+        settings = {
+            "number_of_shards": 1,
+            "number_of_replicas": 0,
+        }
+
+    async def add_comment(
+        self,
+        user: User,
+        content: str,
+        created: Optional[datetime] = None,
+        commit: Optional[bool] = True,
+    ) -> Comment:
+        c = Comment(author=user, content=content, created=created or datetime.now())
+        self.comments.append(c)
+        if commit:
+            await self.save()
+        return c
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # if there is no date, use now
+        if self.created is None:
+            self.created = datetime.now()
+        await super().save(**kwargs)
+
+
+class Question(Post):
+    tags: List[str] = mapped_field(
+        default_factory=list
+    )  # .tags will return empty list if not present
+    title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Question class for parent documents"""
+        return bool(hit["_source"]["question_answer"] == "question")
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> AsyncSearch:  # type: ignore[override]
+        return cls._index.search(**kwargs).filter("term", question_answer="question")
+
+    async def add_answer(
+        self,
+        user: User,
+        body: str,
+        created: Optional[datetime] = None,
+        accepted: bool = False,
+        commit: Optional[bool] = True,
+    ) -> "Answer":
+        answer = Answer(
+            # required make sure the answer is stored in the same shard
+            _routing=self.meta.id,
+            # set up the parent/child mapping
+            question_answer={"name": "answer", "parent": self.meta.id},
+            # pass in the field values
+            author=user,
+            created=created,
+            body=body,
+            is_accepted=accepted,
+        )
+        if commit:
+            await answer.save()
+        return answer
+
+    def search_answers(self) -> AsyncSearch:
+        # search only our index
+        s = Answer.search()
+        # filter for answers belonging to us
+        s = s.filter("parent_id", type="answer", id=self.meta.id)
+        # add routing to only go to specific shard
+        s = s.params(routing=self.meta.id)
+        return s
+
+    async def get_answers(self) -> List[Any]:
+        """
+        Get answers either from inner_hits already present or by searching
+        elasticsearch.
+        """
+        if "inner_hits" in self.meta and "answer" in self.meta.inner_hits:
+            return cast(List[Any], self.meta.inner_hits["answer"].hits)
+        return [a async for a in self.search_answers()]
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        self.question_answer = "question"
+        await super().save(**kwargs)
+
+
+class Answer(Post):
+    is_accepted: bool = mapped_field(default=False)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Answer class for child documents with child name 'answer'"""
+        return (
+            isinstance(hit["_source"]["question_answer"], dict)
+            and hit["_source"]["question_answer"].get("name") == "answer"
+        )
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> AsyncSearch:  # type: ignore[override]
+        return cls._index.search(**kwargs).exclude("term", question_answer="question")
+
+    async def get_question(self) -> Optional[Question]:
+        # cache question in self.meta
+        # any attributes set on self would be interpreted as fields
+        if "question" not in self.meta:
+            self.meta.question = await Question.get(
+                id=self.question_answer.parent, index=self.meta.index
+            )
+        return cast(Optional[Question], self.meta.question)
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # set routing to parents id automatically
+        self.meta.routing = self.question_answer.parent
+        await super().save(**kwargs)
+
+
+async def setup() -> None:
+    """Create an IndexTemplate and save it into elasticsearch."""
+    index_template = Post._index.as_composable_template("base", priority=100)
+    await index_template.save()
+
+
+async def main() -> Answer:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create index
+    await setup()
+
+    # user objects to use
+    nick = User(
+        id=47,
+        signed_up=datetime(2017, 4, 3),
+        username="fxdgear",
+        email="nick.lang@elastic.co",
+        location="Colorado",
+    )
+    honza = User(
+        id=42,
+        signed_up=datetime(2013, 4, 3),
+        username="honzakral",
+        email="honza@elastic.co",
+        location="Prague",
+    )
+
+    # create a question object
+    question = Question(
+        _id=1,
+        author=nick,
+        tags=["elasticsearch", "python"],
+        title="How do I use elasticsearch from Python?",
+        body="""
+        I want to use elasticsearch, how do I do it from Python?
+        """,
+    )
+    await question.save()
+    answer = await question.add_answer(honza, "Just use `elasticsearch-py`!")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+    return answer
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/percolate.py 9.2.0-1/examples/dsl/async/percolate.py
--- 8.17.2-2/examples/dsl/async/percolate.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/percolate.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,117 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import asyncio
+import os
+from typing import TYPE_CHECKING, Any, List, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    Keyword,
+    Percolator,
+    Q,
+    Query,
+    async_connections,
+    mapped_field,
+)
+
+
+class BlogPost(AsyncDocument):
+    """
+    Blog posts that will be automatically tagged based on percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    content: Optional[str]
+    tags: List[str] = mapped_field(Keyword(), default_factory=list)
+
+    class Index:
+        name = "test-blogpost"
+
+    async def add_tags(self) -> None:
+        # run a percolation to automatically tag the blog post.
+        s = AsyncSearch(index="test-percolator")
+        s = s.query(
+            "percolate", field="query", index=self._get_index(), document=self.to_dict()
+        )
+
+        # collect all the tags from matched percolators
+        async for percolator in s:
+            self.tags.extend(percolator.tags)
+
+        # make sure tags are unique
+        self.tags = list(set(self.tags))
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        await self.add_tags()
+        await super().save(**kwargs)
+
+
+class PercolatorDoc(AsyncDocument):
+    """
+    Document class used for storing the percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        _id: str
+
+    # relevant fields from BlogPost must be also present here for the queries
+    # to be able to use them. Another option would be to use document
+    # inheritance but save() would have to be reset to normal behavior.
+    content: Optional[str]
+
+    # the percolator query to be run against the doc
+    query: Query = mapped_field(Percolator())
+    # list of tags to append to a document
+    tags: List[str] = mapped_field(Keyword(multi=True))
+
+    class Index:
+        name = "test-percolator"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def setup() -> None:
+    # create the percolator index if it doesn't exist
+    if not await PercolatorDoc._index.exists():
+        await PercolatorDoc.init()
+
+    # register a percolation query looking for documents about python
+    await PercolatorDoc(
+        _id="python",
+        tags=["programming", "development", "python"],
+        content="",
+        query=Q("match", content="python"),
+    ).save(refresh=True)
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    await setup()
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/search_as_you_type.py 9.2.0-1/examples/dsl/async/search_as_you_type.py
--- 8.17.2-2/examples/dsl/async/search_as_you_type.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/search_as_you_type.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,99 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with search_as_you_type field datatype and how to search it.
+
+When creating a field with search_as_you_type datatype ElasticSearch creates additional
+subfields to enable efficient as-you-type completion, matching terms at any position
+within the input.
+
+To custom analyzer with ascii folding allow search to work in different languages.
+"""
+
+import asyncio
+import os
+from typing import TYPE_CHECKING, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    SearchAsYouType,
+    async_connections,
+    mapped_field,
+)
+from elasticsearch.dsl.query import MultiMatch
+
+
+class Person(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="")
+
+    class Index:
+        name = "test-search-as-you-type"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await Person.init()
+
+    import pprint
+
+    pprint.pprint(Person().to_dict(), indent=2)
+
+    # index some sample data
+    names = [
+        "Andy Warhol",
+        "Alphonse Mucha",
+        "Henri de Toulouse-Lautrec",
+        "Jára Cimrman",
+    ]
+    for id, name in enumerate(names):
+        await Person(_id=id, name=name).save()
+
+    # refresh index manually to make changes live
+    await Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"):
+        s = Person.search()
+
+        s.query = MultiMatch(  # type: ignore[assignment]
+            query=text,
+            type="bool_prefix",
+            fields=["name", "name._2gram", "name._3gram"],
+        )
+
+        response = await s.execute()
+
+        # print out all the options we got
+        for h in response:
+            print("%15s: %25s" % (text, h.name))
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/semantic_text.py 9.2.0-1/examples/dsl/async/semantic_text.py
--- 8.17.2-2/examples/dsl/async/semantic_text.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/semantic_text.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,148 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+"""
+# Semantic Text example
+
+Requirements:
+
+$ pip install "elasticsearch[async]" tqdm
+
+Before running this example, an ELSER inference endpoint must be created in the
+Elasticsearch cluster. This can be done manually from Kibana, or with the
+following curl command from a terminal:
+
+curl -X PUT \
+  "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \
+  -H "Content-Type: application/json" \
+  -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}'
+
+To run the example:
+
+$ python semantic_text.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to the command to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python semantic_text.py "work from home"
+$ python semantic_text.py "vacation time"
+$ python semantic_text.py "can I bring a bird to work?"
+
+When the index is created, the inference service will split the documents into
+short passages, and for each passage a sparse embedding will be generated using
+Elastic's ELSER v2 model.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, Optional
+from urllib.request import urlopen
+
+from tqdm import tqdm
+
+from elasticsearch import dsl
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+
+class WorkplaceDoc(dsl.AsyncDocument):
+    class Index:
+        name = "workplace_documents_semantic"
+
+    name: str
+    summary: str
+    content: Any = dsl.mapped_field(
+        dsl.field.SemanticText(inference_id="my-elser-endpoint")
+    )
+    created: datetime
+    updated: Optional[datetime]
+    url: str = dsl.mapped_field(dsl.Keyword())
+    category: str = dsl.mapped_field(dsl.Keyword())
+
+
+async def create() -> None:
+
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+    # refresh the index
+    await WorkplaceDoc._index.refresh()
+
+
+async def search(query: str) -> dsl.AsyncSearch[WorkplaceDoc]:
+    search = WorkplaceDoc.search()
+    search = search[:5]
+    return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query))
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    dsl.async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Content: {hit.content.text}")
+        print("--------------------\n")
+
+    # close the connection
+    await dsl.async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/sparse_vectors.py 9.2.0-1/examples/dsl/async/sparse_vectors.py
--- 8.17.2-2/examples/dsl/async/sparse_vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/sparse_vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,198 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Sparse vector database example
+
+Requirements:
+
+$ pip install nltk tqdm "elasticsearch[async]"
+
+Before running this example, the ELSER v2 model must be downloaded and deployed
+to the Elasticsearch cluster, and an ingest pipeline must be defined. This can
+be done manually from Kibana, or with the following three curl commands from a
+terminal, adjusting the endpoint as needed:
+
+curl -X PUT \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \
+  -H "Content-Type: application/json" \
+  -d '{"input":{"field_names":["text_field"]}}'
+curl -X POST \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated"
+curl -X PUT \
+  "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \
+  -H "Content-Type: application/json" \
+  -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}'
+
+To run the example:
+
+$ python sparse_vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python sparse_vectors.py "work from home"
+$ python sparse_vectors.py "vacation time"
+$ python sparse_vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage a sparse embedding is generated using Elastic's ELSER v2 model.
+The documents that are returned as search results are those that have the
+highest scored passages. Add `--show-inner-hits` to the command to see
+individual passage results as well.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, Dict, List, Optional
+from urllib.request import urlopen
+
+import nltk
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    InnerDoc,
+    Keyword,
+    Q,
+    SparseVector,
+    async_connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+
+class Passage(InnerDoc):
+    content: Optional[str]
+    embedding: Dict[str, float] = mapped_field(SparseVector(), init=False)
+
+
+class WorkplaceDoc(AsyncDocument):
+    class Index:
+        name = "workplace_documents_sparse"
+        settings = {"default_pipeline": "elser_ingest_pipeline"}
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword())
+    category: str = mapped_field(Keyword())
+    passages: List[Passage] = mapped_field(default=[])
+
+    _model: Any = None
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = nltk.sent_tokenize(self.content)
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(Passage(content=passage))
+
+
+async def create() -> None:
+
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+
+async def search(query: str) -> AsyncSearch[WorkplaceDoc]:
+    return WorkplaceDoc.search()[:5].query(
+        "nested",
+        path="passages",
+        query=Q(
+            "text_expansion",
+            passages__content={
+                "model_id": ".elser_model_2",
+                "model_text": query,
+            },
+        ),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/async/vectors.py 9.2.0-1/examples/dsl/async/vectors.py
--- 8.17.2-2/examples/dsl/async/vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/async/vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,187 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Vector database example
+
+Requirements:
+
+$ pip install nltk sentence_transformers tqdm "elasticsearch[async]"
+
+To run the example:
+
+$ python vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python vectors.py "work from home"
+$ python vectors.py "vacation time"
+$ python vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage an embedding is generated using the open source
+"all-MiniLM-L6-v2" model. The documents that are returned as search results are
+those that have the highest scored passages. Add `--show-inner-hits` to the
+command to see individual passage results as well.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, List, Optional, cast
+from urllib.request import urlopen
+
+import nltk
+from sentence_transformers import SentenceTransformer
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    DenseVector,
+    InnerDoc,
+    Keyword,
+    M,
+    async_connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+MODEL_NAME = "all-MiniLM-L6-v2"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+# this will be the embedding model
+embedding_model: Any = None
+
+
+class Passage(InnerDoc):
+    content: str
+    embedding: List[float] = mapped_field(DenseVector())
+
+
+class WorkplaceDoc(AsyncDocument):
+    class Index:
+        name = "workplace_documents"
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword(required=True))
+    category: str = mapped_field(Keyword(required=True))
+    passages: M[List[Passage]] = mapped_field(default=[])
+
+    @classmethod
+    def get_embedding(cls, input: str) -> List[float]:
+        global embedding_model
+        if embedding_model is None:
+            embedding_model = SentenceTransformer(MODEL_NAME)
+        return cast(List[float], list(embedding_model.encode(input)))
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = cast(List[str], nltk.sent_tokenize(self.content))
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(
+                Passage(content=passage, embedding=self.get_embedding(passage))
+            )
+
+
+async def create() -> None:
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+
+async def search(query: str) -> AsyncSearch[WorkplaceDoc]:
+    return WorkplaceDoc.search().knn(
+        field=WorkplaceDoc.passages.embedding,
+        k=5,
+        num_candidates=50,
+        query_vector=list(WorkplaceDoc.get_embedding(query)),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/examples/dsl/completion.py 9.2.0-1/examples/dsl/completion.py
--- 8.17.2-2/examples/dsl/completion.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/completion.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,113 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with completion suggester.
+
+In the ``Person`` class we index the person's name to allow auto completing in
+any order ("first last", "middle last first", ...). For the weight we use a
+value from the ``popularity`` field which is a long.
+
+To make the suggestions work in different languages we added a custom analyzer
+that does ascii folding.
+"""
+
+import os
+from itertools import permutations
+from typing import TYPE_CHECKING, Any, Dict, Optional
+
+from elasticsearch.dsl import (
+    Completion,
+    Document,
+    Keyword,
+    Long,
+    Text,
+    analyzer,
+    connections,
+    mapped_field,
+    token_filter,
+)
+
+# custom analyzer for names
+ascii_fold = analyzer(
+    "ascii_fold",
+    # we don't want to split O'Brian or Toulouse-Lautrec
+    tokenizer="whitespace",
+    filter=["lowercase", token_filter("ascii_fold", "asciifolding")],
+)
+
+
+class Person(Document):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+    popularity: int = mapped_field(Long(), default=0)
+
+    # completion field with a custom analyzer
+    suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False)
+
+    def clean(self) -> None:
+        """
+        Automatically construct the suggestion input and weight by taking all
+        possible permutations of Person's name as ``input`` and taking their
+        popularity as ``weight``.
+        """
+        self.suggest = {
+            "input": [" ".join(p) for p in permutations(self.name.split())],
+            "weight": self.popularity,
+        }
+
+    class Index:
+        name = "test-suggest"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    Person.init()
+
+    # index some sample data
+    for id, (name, popularity) in enumerate(
+        [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)]
+    ):
+        Person(_id=id, name=name, popularity=popularity).save()
+
+    # refresh index manually to make changes live
+    Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Jara Cimr", "tou", "de hen"):
+        s = Person.search()
+        s = s.suggest("auto_complete", text, completion={"field": "suggest"})
+        response = s.execute()
+
+        # print out all the options we got
+        for option in response.suggest["auto_complete"][0].options:
+            print("%10s: %25s (%d)" % (text, option._source.name, option._score))
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/composite_agg.py 9.2.0-1/examples/dsl/composite_agg.py
--- 8.17.2-2/examples/dsl/composite_agg.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/composite_agg.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,90 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import os
+from typing import Any, Dict, Iterator, Mapping, Sequence, cast
+
+from elasticsearch.dsl import Agg, Response, Search, aggs, connections
+from elasticsearch.dsl.types import CompositeAggregate
+from elasticsearch.helpers import bulk
+from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX
+
+
+def scan_aggs(
+    search: Search,
+    source_aggs: Sequence[Mapping[str, Agg]],
+    inner_aggs: Dict[str, Agg] = {},
+    size: int = 10,
+) -> Iterator[CompositeAggregate]:
+    """
+    Helper function used to iterate over all possible bucket combinations of
+    ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
+    ``composite`` aggregation under the hood to perform this.
+    """
+
+    def run_search(**kwargs: Any) -> Response:
+        s = search[:0]
+        bucket = s.aggs.bucket(
+            "comp",
+            aggs.Composite(
+                sources=source_aggs,
+                size=size,
+                **kwargs,
+            ),
+        )
+        for agg_name, agg in inner_aggs.items():
+            bucket[agg_name] = agg
+        return s.execute()
+
+    response = run_search()
+    while response.aggregations["comp"].buckets:
+        for b in response.aggregations["comp"].buckets:
+            yield cast(CompositeAggregate, b)
+        if "after_key" in response.aggregations["comp"]:
+            after = response.aggregations["comp"].after_key
+        else:
+            after = response.aggregations["comp"].buckets[-1].key
+        response = run_search(after=after)
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    client = connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the index and populate it with some data
+    # note that the dataset is imported from the library's test suite
+    client.indices.delete(index="git", ignore_unavailable=True)
+    client.indices.create(index="git", **GIT_INDEX)
+    bulk(client, DATA, raise_on_error=True, refresh=True)
+
+    # run some aggregations on the data
+    for b in scan_aggs(
+        Search(index="git"),
+        [{"files": aggs.Terms(field="files")}],
+        {"first_seen": aggs.Min(field="committed_date")},
+    ):
+        print(
+            "File %s has been modified %d times, first seen at %s."
+            % (b.key.files, b.doc_count, b.first_seen.value_as_string)
+        )
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/esql_employees.py 9.2.0-1/examples/dsl/esql_employees.py
--- 8.17.2-2/examples/dsl/esql_employees.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/esql_employees.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,169 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# ES|QL query builder example
+
+Requirements:
+
+$ pip install elasticsearch faker
+
+To run the example:
+
+$ python esql_employees.py "name to search"
+
+The index will be created automatically with a list of 1000 randomly generated
+employees if it does not exist. Add `--recreate-index` or `-r` to the command
+to regenerate it.
+
+Examples:
+
+$ python esql_employees "Mark"  # employees named Mark (first or last names)
+$ python esql_employees "Sarah" --limit 10  # up to 10 employees named Sarah
+$ python esql_employees "Sam" --sort height  # sort results by height
+$ python esql_employees "Sam" --sort name  # sort results by last name
+"""
+
+import argparse
+import os
+import random
+
+from faker import Faker
+
+from elasticsearch.dsl import Document, InnerDoc, M, connections
+from elasticsearch.esql import ESQLBase
+from elasticsearch.esql.functions import concat, multi_match
+
+fake = Faker()
+
+
+class Address(InnerDoc):
+    address: M[str]
+    city: M[str]
+    zip_code: M[str]
+
+
+class Employee(Document):
+    emp_no: M[int]
+    first_name: M[str]
+    last_name: M[str]
+    height: M[float]
+    still_hired: M[bool]
+    address: M[Address]
+
+    class Index:
+        name = "employees"
+
+    @property
+    def name(self) -> str:
+        return f"{self.first_name} {self.last_name}"
+
+    def __repr__(self) -> str:
+        return f"<Employee[{self.meta.id}]: {self.first_name} {self.last_name}>"
+
+
+def create(num_employees: int = 1000) -> None:
+    print("Creating a new employee index...")
+    if Employee._index.exists():
+        Employee._index.delete()
+    Employee.init()
+
+    for i in range(num_employees):
+        address = Address(
+            address=fake.address(), city=fake.city(), zip_code=fake.zipcode()
+        )
+        emp = Employee(
+            emp_no=10000 + i,
+            first_name=fake.first_name(),
+            last_name=fake.last_name(),
+            height=int((random.random() * 0.8 + 1.5) * 1000) / 1000,
+            still_hired=random.random() >= 0.5,
+            address=address,
+        )
+        emp.save()
+    Employee._index.refresh()
+
+
+def search(query: str, limit: int, sort: str) -> None:
+    q: ESQLBase = (
+        Employee.esql_from()
+        .where(multi_match(query, Employee.first_name, Employee.last_name))
+        .eval(full_name=concat(Employee.first_name, " ", Employee.last_name))
+    )
+    if sort == "height":
+        q = q.sort(Employee.height.desc())
+    elif sort == "name":
+        q = q.sort(Employee.last_name.asc())
+    q = q.limit(limit)
+    for result in Employee.esql_execute(q, return_additional=True):
+        assert type(result) == tuple
+        employee = result[0]
+        full_name = result[1]["full_name"]
+        print(
+            f"{full_name:<20}",
+            f"{'Hired' if employee.still_hired else 'Not hired':<10}",
+            f"{employee.height:5.2f}m",
+            f"{employee.address.city:<20}",
+        )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Employee ES|QL example")
+    parser.add_argument(
+        "--recreate-index",
+        "-r",
+        action="store_true",
+        help="Recreate and populate the index",
+    )
+    parser.add_argument(
+        "--limit",
+        action="store",
+        type=int,
+        default=100,
+        help="Maximum number or employees to return (default: 100)",
+    )
+    parser.add_argument(
+        "--sort",
+        action="store",
+        type=str,
+        default=None,
+        help='Sort by "name" (ascending) or by "height" (descending) (default: no sorting)',
+    )
+    parser.add_argument(
+        "query", action="store", help="The name or partial name to search for"
+    )
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not Employee._index.exists():
+        create()
+    Employee.init()
+
+    search(args.query, args.limit, args.sort)
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/parent_child.py 9.2.0-1/examples/dsl/parent_child.py
--- 8.17.2-2/examples/dsl/parent_child.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/parent_child.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,275 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Complex data model example modeling stackoverflow-like data.
+
+It is used to showcase several key features of elasticsearch-dsl:
+
+    * Object and Nested fields: see User and Comment classes and fields they
+      are used in
+
+        * method add_comment is used to add comments
+
+    * Parent/Child relationship
+
+        * See the Join field on Post creating the relationship between Question
+          and Answer
+
+        * Meta.matches allows the hits from same index to be wrapped in proper
+          classes
+
+        * to see how child objects are created see Question.add_answer
+
+        * Question.search_answers shows how to query for children of a
+          particular parent
+
+"""
+import os
+from datetime import datetime
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast
+
+from elasticsearch.dsl import (
+    Date,
+    Document,
+    InnerDoc,
+    Join,
+    Keyword,
+    Long,
+    Search,
+    Text,
+    connections,
+    mapped_field,
+)
+
+
+class User(InnerDoc):
+    """
+    Class used to represent a denormalized user stored on other objects.
+    """
+
+    id: int = mapped_field(Long())
+    signed_up: Optional[datetime] = mapped_field(Date())
+    username: str = mapped_field(Text(fields={"keyword": Keyword()}))
+    email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+    location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+
+
+class Comment(InnerDoc):
+    """
+    Class wrapper for nested comment objects.
+    """
+
+    author: User
+    created: datetime
+    content: str
+
+
+class Post(Document):
+    """
+    Base class for Question and Answer containing the common fields.
+    """
+
+    author: User
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _routing: str = mapped_field(default=None)
+        _id: Optional[int] = mapped_field(default=None)
+
+    created: Optional[datetime] = mapped_field(default=None)
+    body: str = mapped_field(default="")
+    comments: List[Comment] = mapped_field(default_factory=list)
+    question_answer: Any = mapped_field(
+        Join(relations={"question": "answer"}), default_factory=dict
+    )
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # Post is an abstract class, make sure it never gets used for
+        # deserialization
+        return False
+
+    class Index:
+        name = "test-qa-site"
+        settings = {
+            "number_of_shards": 1,
+            "number_of_replicas": 0,
+        }
+
+    def add_comment(
+        self,
+        user: User,
+        content: str,
+        created: Optional[datetime] = None,
+        commit: Optional[bool] = True,
+    ) -> Comment:
+        c = Comment(author=user, content=content, created=created or datetime.now())
+        self.comments.append(c)
+        if commit:
+            self.save()
+        return c
+
+    def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # if there is no date, use now
+        if self.created is None:
+            self.created = datetime.now()
+        super().save(**kwargs)
+
+
+class Question(Post):
+    tags: List[str] = mapped_field(
+        default_factory=list
+    )  # .tags will return empty list if not present
+    title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Question class for parent documents"""
+        return bool(hit["_source"]["question_answer"] == "question")
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> Search:  # type: ignore[override]
+        return cls._index.search(**kwargs).filter("term", question_answer="question")
+
+    def add_answer(
+        self,
+        user: User,
+        body: str,
+        created: Optional[datetime] = None,
+        accepted: bool = False,
+        commit: Optional[bool] = True,
+    ) -> "Answer":
+        answer = Answer(
+            # required make sure the answer is stored in the same shard
+            _routing=self.meta.id,
+            # set up the parent/child mapping
+            question_answer={"name": "answer", "parent": self.meta.id},
+            # pass in the field values
+            author=user,
+            created=created,
+            body=body,
+            is_accepted=accepted,
+        )
+        if commit:
+            answer.save()
+        return answer
+
+    def search_answers(self) -> Search:
+        # search only our index
+        s = Answer.search()
+        # filter for answers belonging to us
+        s = s.filter("parent_id", type="answer", id=self.meta.id)
+        # add routing to only go to specific shard
+        s = s.params(routing=self.meta.id)
+        return s
+
+    def get_answers(self) -> List[Any]:
+        """
+        Get answers either from inner_hits already present or by searching
+        elasticsearch.
+        """
+        if "inner_hits" in self.meta and "answer" in self.meta.inner_hits:
+            return cast(List[Any], self.meta.inner_hits["answer"].hits)
+        return [a for a in self.search_answers()]
+
+    def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        self.question_answer = "question"
+        super().save(**kwargs)
+
+
+class Answer(Post):
+    is_accepted: bool = mapped_field(default=False)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Answer class for child documents with child name 'answer'"""
+        return (
+            isinstance(hit["_source"]["question_answer"], dict)
+            and hit["_source"]["question_answer"].get("name") == "answer"
+        )
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> Search:  # type: ignore[override]
+        return cls._index.search(**kwargs).exclude("term", question_answer="question")
+
+    def get_question(self) -> Optional[Question]:
+        # cache question in self.meta
+        # any attributes set on self would be interpreted as fields
+        if "question" not in self.meta:
+            self.meta.question = Question.get(
+                id=self.question_answer.parent, index=self.meta.index
+            )
+        return cast(Optional[Question], self.meta.question)
+
+    def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # set routing to parents id automatically
+        self.meta.routing = self.question_answer.parent
+        super().save(**kwargs)
+
+
+def setup() -> None:
+    """Create an IndexTemplate and save it into elasticsearch."""
+    index_template = Post._index.as_composable_template("base", priority=100)
+    index_template.save()
+
+
+def main() -> Answer:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create index
+    setup()
+
+    # user objects to use
+    nick = User(
+        id=47,
+        signed_up=datetime(2017, 4, 3),
+        username="fxdgear",
+        email="nick.lang@elastic.co",
+        location="Colorado",
+    )
+    honza = User(
+        id=42,
+        signed_up=datetime(2013, 4, 3),
+        username="honzakral",
+        email="honza@elastic.co",
+        location="Prague",
+    )
+
+    # create a question object
+    question = Question(
+        _id=1,
+        author=nick,
+        tags=["elasticsearch", "python"],
+        title="How do I use elasticsearch from Python?",
+        body="""
+        I want to use elasticsearch, how do I do it from Python?
+        """,
+    )
+    question.save()
+    answer = question.add_answer(honza, "Just use `elasticsearch-py`!")
+
+    # close the connection
+    connections.get_connection().close()
+
+    return answer
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/percolate.py 9.2.0-1/examples/dsl/percolate.py
--- 8.17.2-2/examples/dsl/percolate.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/percolate.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,116 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import os
+from typing import TYPE_CHECKING, Any, List, Optional
+
+from elasticsearch.dsl import (
+    Document,
+    Keyword,
+    Percolator,
+    Q,
+    Query,
+    Search,
+    connections,
+    mapped_field,
+)
+
+
+class BlogPost(Document):
+    """
+    Blog posts that will be automatically tagged based on percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    content: Optional[str]
+    tags: List[str] = mapped_field(Keyword(), default_factory=list)
+
+    class Index:
+        name = "test-blogpost"
+
+    def add_tags(self) -> None:
+        # run a percolation to automatically tag the blog post.
+        s = Search(index="test-percolator")
+        s = s.query(
+            "percolate", field="query", index=self._get_index(), document=self.to_dict()
+        )
+
+        # collect all the tags from matched percolators
+        for percolator in s:
+            self.tags.extend(percolator.tags)
+
+        # make sure tags are unique
+        self.tags = list(set(self.tags))
+
+    def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        self.add_tags()
+        super().save(**kwargs)
+
+
+class PercolatorDoc(Document):
+    """
+    Document class used for storing the percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        _id: str
+
+    # relevant fields from BlogPost must be also present here for the queries
+    # to be able to use them. Another option would be to use document
+    # inheritance but save() would have to be reset to normal behavior.
+    content: Optional[str]
+
+    # the percolator query to be run against the doc
+    query: Query = mapped_field(Percolator())
+    # list of tags to append to a document
+    tags: List[str] = mapped_field(Keyword(multi=True))
+
+    class Index:
+        name = "test-percolator"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+def setup() -> None:
+    # create the percolator index if it doesn't exist
+    if not PercolatorDoc._index.exists():
+        PercolatorDoc.init()
+
+    # register a percolation query looking for documents about python
+    PercolatorDoc(
+        _id="python",
+        tags=["programming", "development", "python"],
+        content="",
+        query=Q("match", content="python"),
+    ).save(refresh=True)
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    setup()
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/search_as_you_type.py 9.2.0-1/examples/dsl/search_as_you_type.py
--- 8.17.2-2/examples/dsl/search_as_you_type.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/search_as_you_type.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,98 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with search_as_you_type field datatype and how to search it.
+
+When creating a field with search_as_you_type datatype ElasticSearch creates additional
+subfields to enable efficient as-you-type completion, matching terms at any position
+within the input.
+
+To custom analyzer with ascii folding allow search to work in different languages.
+"""
+
+import os
+from typing import TYPE_CHECKING, Optional
+
+from elasticsearch.dsl import (
+    Document,
+    SearchAsYouType,
+    connections,
+    mapped_field,
+)
+from elasticsearch.dsl.query import MultiMatch
+
+
+class Person(Document):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="")
+
+    class Index:
+        name = "test-search-as-you-type"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    Person.init()
+
+    import pprint
+
+    pprint.pprint(Person().to_dict(), indent=2)
+
+    # index some sample data
+    names = [
+        "Andy Warhol",
+        "Alphonse Mucha",
+        "Henri de Toulouse-Lautrec",
+        "Jára Cimrman",
+    ]
+    for id, name in enumerate(names):
+        Person(_id=id, name=name).save()
+
+    # refresh index manually to make changes live
+    Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"):
+        s = Person.search()
+
+        s.query = MultiMatch(  # type: ignore[assignment]
+            query=text,
+            type="bool_prefix",
+            fields=["name", "name._2gram", "name._3gram"],
+        )
+
+        response = s.execute()
+
+        # print out all the options we got
+        for h in response:
+            print("%15s: %25s" % (text, h.name))
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/semantic_text.py 9.2.0-1/examples/dsl/semantic_text.py
--- 8.17.2-2/examples/dsl/semantic_text.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/semantic_text.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,147 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+"""
+# Semantic Text example
+
+Requirements:
+
+$ pip install elasticsearch tqdm
+
+Before running this example, an ELSER inference endpoint must be created in the
+Elasticsearch cluster. This can be done manually from Kibana, or with the
+following curl command from a terminal:
+
+curl -X PUT \
+  "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \
+  -H "Content-Type: application/json" \
+  -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}'
+
+To run the example:
+
+$ python semantic_text.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to the command to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python semantic_text.py "work from home"
+$ python semantic_text.py "vacation time"
+$ python semantic_text.py "can I bring a bird to work?"
+
+When the index is created, the inference service will split the documents into
+short passages, and for each passage a sparse embedding will be generated using
+Elastic's ELSER v2 model.
+"""
+
+import argparse
+import json
+import os
+from datetime import datetime
+from typing import Any, Optional
+from urllib.request import urlopen
+
+from tqdm import tqdm
+
+from elasticsearch import dsl
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+
+class WorkplaceDoc(dsl.Document):
+    class Index:
+        name = "workplace_documents_semantic"
+
+    name: str
+    summary: str
+    content: Any = dsl.mapped_field(
+        dsl.field.SemanticText(inference_id="my-elser-endpoint")
+    )
+    created: datetime
+    updated: Optional[datetime]
+    url: str = dsl.mapped_field(dsl.Keyword())
+    category: str = dsl.mapped_field(dsl.Keyword())
+
+
+def create() -> None:
+
+    # create the index
+    WorkplaceDoc._index.delete(ignore_unavailable=True)
+    WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        doc.save()
+
+    # refresh the index
+    WorkplaceDoc._index.refresh()
+
+
+def search(query: str) -> dsl.Search[WorkplaceDoc]:
+    search = WorkplaceDoc.search()
+    search = search[:5]
+    return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query))
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    dsl.connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not WorkplaceDoc._index.exists():
+        create()
+
+    results = search(args.query)
+
+    for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Content: {hit.content.text}")
+        print("--------------------\n")
+
+    # close the connection
+    dsl.connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/sparse_vectors.py 9.2.0-1/examples/dsl/sparse_vectors.py
--- 8.17.2-2/examples/dsl/sparse_vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/sparse_vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,197 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Sparse vector database example
+
+Requirements:
+
+$ pip install nltk tqdm elasticsearch
+
+Before running this example, the ELSER v2 model must be downloaded and deployed
+to the Elasticsearch cluster, and an ingest pipeline must be defined. This can
+be done manually from Kibana, or with the following three curl commands from a
+terminal, adjusting the endpoint as needed:
+
+curl -X PUT \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \
+  -H "Content-Type: application/json" \
+  -d '{"input":{"field_names":["text_field"]}}'
+curl -X POST \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated"
+curl -X PUT \
+  "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \
+  -H "Content-Type: application/json" \
+  -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}'
+
+To run the example:
+
+$ python sparse_vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python sparse_vectors.py "work from home"
+$ python sparse_vectors.py "vacation time"
+$ python sparse_vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage a sparse embedding is generated using Elastic's ELSER v2 model.
+The documents that are returned as search results are those that have the
+highest scored passages. Add `--show-inner-hits` to the command to see
+individual passage results as well.
+"""
+
+import argparse
+import json
+import os
+from datetime import datetime
+from typing import Any, Dict, List, Optional
+from urllib.request import urlopen
+
+import nltk
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    Document,
+    InnerDoc,
+    Keyword,
+    Q,
+    Search,
+    SparseVector,
+    connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+
+class Passage(InnerDoc):
+    content: Optional[str]
+    embedding: Dict[str, float] = mapped_field(SparseVector(), init=False)
+
+
+class WorkplaceDoc(Document):
+    class Index:
+        name = "workplace_documents_sparse"
+        settings = {"default_pipeline": "elser_ingest_pipeline"}
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword())
+    category: str = mapped_field(Keyword())
+    passages: List[Passage] = mapped_field(default=[])
+
+    _model: Any = None
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = nltk.sent_tokenize(self.content)
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(Passage(content=passage))
+
+
+def create() -> None:
+
+    # create the index
+    WorkplaceDoc._index.delete(ignore_unavailable=True)
+    WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        doc.save()
+
+
+def search(query: str) -> Search[WorkplaceDoc]:
+    return WorkplaceDoc.search()[:5].query(
+        "nested",
+        path="passages",
+        query=Q(
+            "text_expansion",
+            passages__content={
+                "model_id": ".elser_model_2",
+                "model_text": query,
+            },
+        ),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not WorkplaceDoc._index.exists():
+        create()
+
+    results = search(args.query)
+
+    for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/dsl/vectors.py 9.2.0-1/examples/dsl/vectors.py
--- 8.17.2-2/examples/dsl/vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/dsl/vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,186 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Vector database example
+
+Requirements:
+
+$ pip install nltk sentence_transformers tqdm elasticsearch
+
+To run the example:
+
+$ python vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python vectors.py "work from home"
+$ python vectors.py "vacation time"
+$ python vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage an embedding is generated using the open source
+"all-MiniLM-L6-v2" model. The documents that are returned as search results are
+those that have the highest scored passages. Add `--show-inner-hits` to the
+command to see individual passage results as well.
+"""
+
+import argparse
+import json
+import os
+from datetime import datetime
+from typing import Any, List, Optional, cast
+from urllib.request import urlopen
+
+import nltk
+from sentence_transformers import SentenceTransformer
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    DenseVector,
+    Document,
+    InnerDoc,
+    Keyword,
+    M,
+    Search,
+    connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+MODEL_NAME = "all-MiniLM-L6-v2"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+# this will be the embedding model
+embedding_model: Any = None
+
+
+class Passage(InnerDoc):
+    content: str
+    embedding: List[float] = mapped_field(DenseVector())
+
+
+class WorkplaceDoc(Document):
+    class Index:
+        name = "workplace_documents"
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword(required=True))
+    category: str = mapped_field(Keyword(required=True))
+    passages: M[List[Passage]] = mapped_field(default=[])
+
+    @classmethod
+    def get_embedding(cls, input: str) -> List[float]:
+        global embedding_model
+        if embedding_model is None:
+            embedding_model = SentenceTransformer(MODEL_NAME)
+        return cast(List[float], list(embedding_model.encode(input)))
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = cast(List[str], nltk.sent_tokenize(self.content))
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(
+                Passage(content=passage, embedding=self.get_embedding(passage))
+            )
+
+
+def create() -> None:
+    # create the index
+    WorkplaceDoc._index.delete(ignore_unavailable=True)
+    WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        doc.save()
+
+
+def search(query: str) -> Search[WorkplaceDoc]:
+    return WorkplaceDoc.search().knn(
+        field=WorkplaceDoc.passages.embedding,
+        k=5,
+        num_candidates=50,
+        query_vector=list(WorkplaceDoc.get_embedding(query)),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not WorkplaceDoc._index.exists():
+        create()
+
+    results = search(args.query)
+
+    for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/examples/fastapi-apm/dockerfiles/Dockerfile.app 9.2.0-1/examples/fastapi-apm/dockerfiles/Dockerfile.app
--- 8.17.2-2/examples/fastapi-apm/dockerfiles/Dockerfile.app	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/examples/fastapi-apm/dockerfiles/Dockerfile.app	2025-10-28 16:50:53.000000000 +0000
@@ -1,4 +1,4 @@
-FROM python:3.8
+FROM python:3.10
 
 EXPOSE 9292
 WORKDIR /
diff -pruN 8.17.2-2/examples/fastapi-apm/dockerfiles/Dockerfile.ping 9.2.0-1/examples/fastapi-apm/dockerfiles/Dockerfile.ping
--- 8.17.2-2/examples/fastapi-apm/dockerfiles/Dockerfile.ping	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/examples/fastapi-apm/dockerfiles/Dockerfile.ping	2025-10-28 16:50:53.000000000 +0000
@@ -1,4 +1,4 @@
-FROM python:3.8
+FROM python:3.10
 WORKDIR /
 RUN python -m pip install \
     --no-cache \
diff -pruN 8.17.2-2/examples/quotes/.gitignore 9.2.0-1/examples/quotes/.gitignore
--- 8.17.2-2/examples/quotes/.gitignore	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/.gitignore	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,24 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+dist
+dist-ssr
+*.local
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+.DS_Store
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
diff -pruN 8.17.2-2/examples/quotes/README.md 9.2.0-1/examples/quotes/README.md
--- 8.17.2-2/examples/quotes/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/README.md	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,107 @@
+# Quotes
+Quotes database example, which demonstrates the Elasticsearch integration with
+Pydantic models. This example features a React frontend and a FastAPI back end.
+
+![Quotes app screenshot](screenshot.png)
+
+## What is this?
+
+This directory contains a small application that demonstrates how easy it is
+to set up a full-text and vector database using [Elasticsearch](https://www.elastic.co/elasticsearch),
+while defining the data model with [Pydantic](https://docs.pydantic.dev/latest/).
+
+The application includes a FastAPI back end and a React front end. It ingests a
+dataset of famous quotes into in an Elasticsearch index, and for each quote it
+generates an embedding using the
+[all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)
+Sentence Transformers model.
+
+The dataset has about 850 famous quotes, each with their author and tags. The
+data is a subset of a
+[Kaggle dataset](https://www.kaggle.com/datasets/akmittal/quotes-dataset) that
+appears to have been generated from quotes that were scraped from the Goodreads
+[popular quotes](https://www.goodreads.com/quotes) page.
+
+## Requirements
+
+Please make sure you have the following installed:
+
+- Python 3.10 or newer
+- Node.js 18 or newer
+
+## How To Run
+
+Follow these steps to install this demo on your computer:
+
+### Clone this repository
+
+Run the following command to install a copy of this project on your computer:
+
+```bash
+git clone https://github.com/elastic/elasticsearch-py
+cd examples/quotes
+```
+
+### Install the Node and Python dependencies
+
+Run the following command to set up the JavaScript and Python environment and
+install all the dependencies:
+
+```bash
+npm install
+```
+
+### Start a development Elasticsearch container
+
+You can use [start-local](https://www.elastic.co/docs/deploy-manage/deploy/self-managed/local-development-installation-quickstart)
+to start a small Elasticsearch instance.
+
+Use this command to launch the instance (Docker and Docker Compose are required):
+
+```bash
+curl -fsSL https://elastic.co/start-local | sh
+```
+
+Once your Elasticsearch instead is deployed, create an environment variable called
+`ELASTICSEARCH_URL`, making sure it includes the password generated by start-local.
+Example:
+
+```bash
+export ELASTICSEARCH_URL=http://elastic:your-password-here@localhost:9200
+```
+
+### Create the quotes database
+
+Run this command in your terminal:
+
+```bash
+npm run ingest
+```
+
+Note that the `ELASTICSEARCH_URL` variable must be defined in the terminal
+session in which you run this command.
+
+This task should take a minute or less. The GPU, if available, is used optimize
+the generation of the embeddings.
+
+### Start the back end
+
+Run this command in your terminal:
+
+```bash
+npm run backend
+```
+
+Note that the `ELASTICSEARCH_URL` variable must be defined in the terminal
+session in which you run this command.
+
+### Start the front end
+
+Open a second terminal window and run this command:
+
+```bash
+npm run dev
+```
+
+You can now navigate to `http://localhost:5173` on your web browser to access
+the application.
diff -pruN 8.17.2-2/examples/quotes/backend/pyproject.toml 9.2.0-1/examples/quotes/backend/pyproject.toml
--- 8.17.2-2/examples/quotes/backend/pyproject.toml	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/backend/pyproject.toml	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,18 @@
+[project]
+name = "quotes"
+version = "0.1"
+dependencies = [
+    "elasticsearch[async]>=9,<10",
+    "fastapi",
+    "orjson",
+    "sentence-transformers",
+    "uvicorn",
+]
+
+[project.optional-dependencies]
+dev = [
+]
+
+[build-system]
+requires = ["hatchling"]
+build-backend = "hatchling.build"
diff -pruN 8.17.2-2/examples/quotes/backend/quotes.csv 9.2.0-1/examples/quotes/backend/quotes.csv
--- 8.17.2-2/examples/quotes/backend/quotes.csv	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/backend/quotes.csv	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,843 @@
+"quote","author","tags"
+"Two things are infinite: the universe and human stupidity; and I'm not sure about the universe.","Albert Einstein","attributed-no-source,human-nature,humor,infinity,philosophy,science,stupidity,universe"
+"In three words I can sum up everything I've learned about life: it goes on.","Robert Frost","moving-on,learned"
+"If you tell the truth, you don't have to remember anything.","Mark Twain","lies,lying,memory,truth"
+"I've learned that people will forget what you said, people will forget what you did, but people will never forget how you made them feel.","Maya Angelou","61419-likes"
+"There are only two ways to live your life. One is as though nothing is a miracle. The other is as though everything is a miracle.","Albert Einstein","attributed-no-source,inspirational,life,live,miracle,miracles"
+"Good friends, good books, and a sleepy conscience: this is the ideal life.","Mark Twain","books,contentment,friends,friendship,life"
+"Whenever you find yourself on the side of the majority, it is time to reform (or pause and reflect).","Mark Twain","individuality,majority,minority,pause,reflect,wisdom"
+"The man who does not read has no advantage over the man who cannot read.","Mark Twain","aliteracy,attributed-no-source,literacy"
+"Outside of a dog, a book is man's best friend. Inside of a dog it's too dark to read.","Groucho Marx","animals,books,dogs,friends,humor"
+"I am enough of an artist to draw freely upon my imagination. Imagination is more important than knowledge. Knowledge is limited. Imagination encircles the world.","Albert Einstein","1929,imagination,inspirational,viereck-interview"
+"Never put off till tomorrow what may be done day after tomorrow just as well.","Mark Twain","humor,procrastination"
+"If you can't explain it to a six year old, you don't understand it yourself.","Albert Einstein","attributed-no-source,simplicity,understand"
+"Everything you can imagine is real.","Pablo Picasso","art,imagination,inspirational,life"
+"If you want your children to be intelligent, read them fairy tales. If you want them to be more intelligent, read them more fairy tales.","Albert Einstein","attributed-no-source,children,fairy-tales"
+"Logic will get you from A to Z; imagination will get you everywhere.","Albert Einstein","attributed-no-source,imagination"
+"I find television very educating. Every time somebody turns on the set, I go into the other room and read a book.","Groucho Marx","books,humor,reading,television"
+"When one door of happiness closes, another opens; but often we look so long at the closed door that we do not see the one which has been opened for us.","Helen Keller","inspirational"
+"Life is like riding a bicycle. To keep your balance, you must keep moving.","Albert Einstein","balance,moving,bicycle"
+"If everyone is moving forward together, then success takes care of itself.","Henry Ford","together,care,moving"
+"Love is like a beautiful flower which I may not touch, but whose fragrance makes the garden a place of delight just the same.","Helen Keller","flower,beautiful,garden"
+"What's money? A man is a success if he gets up in the morning and goes to bed at night and in between does what he wants to do.","Bob Dylan","morning,money,night"
+"The most beautiful thing we can experience is the mysterious. It is the source of all true art and science.","Albert Einstein","experience,science"
+"You may say I'm a dreamer, but I'm not the only one. I hope someday you'll join us. And the world will live as one.","John Lennon","beatles,connection,dreamers,dreaming,dreams,hope,inspirational,peace"
+"Optimism is the faith that leads to achievement. Nothing can be done without hope and confidence.","Helen Keller","motivational,faith"
+"Love is an irresistible desire to be irresistibly desired.","Robert Frost","love"
+"Love is the flower you've got to let grow.","John Lennon","flower,grow"
+"Success depends upon previous preparation, and without such preparation there is sure to be failure.","Confucius","failure,sure,depends"
+"Imagine all the people living life in peace. You may say I'm a dreamer, but I'm not the only one. I hope someday you'll join us, and the world will be as one.","John Lennon","peace,hope,dreamer"
+"The question isn't who is going to let me; it's who is going to stop me.","Ayn Rand","15880-likes"
+"Coming together is a beginning; keeping together is progress; working together is success.","Henry Ford","together,progress"
+"I have never let my schooling interfere with my education.","Mark Twain","attributed-no-source,education"
+"Try not to become a man of success, but rather try to become a man of value.","Albert Einstein","value,rather"
+"Anyone who has never made a mistake has never tried anything new.","Albert Einstein","attributed-no-source,mistakes"
+"Action is the foundational key to all success.","Pablo Picasso","action,key"
+"The fear of death follows from the fear of life. A man who lives fully is prepared to die at any time.","Mark Twain","death,life"
+"′Classic′ - a book which people praise and don't read.","Mark Twain","books,classic,reading"
+"The biggest adventure you can take is to live the life of your dreams.","Oprah Winfrey","dreams,adventure,biggest"
+"Alcohol may be man's worst enemy, but the bible says love your enemy.","Frank Sinatra","alcohol,bible,drinking,enemies,religion"
+"Happiness lies in the joy of achievement and the thrill of creative effort.","Franklin D. Roosevelt","happiness"
+"All you need in this life is ignorance and confidence, and then success is sure.","Mark Twain","success,ignorance"
+"Love recognizes no barriers. It jumps hurdles, leaps fences, penetrates walls to arrive at its destination full of hope.","Maya Angelou","love"
+"The foundation stones for a balanced success are honesty, character, integrity, faith, love and loyalty.","Zig Ziglar","success,faith,character"
+"Strive not to be a success, but rather to be of value.","Albert Einstein","business,success,value"
+"I've failed over and over and over again in my life and that is why I succeed.","Michael Jordan","success,succeed,again"
+"True art is characterized by an irresistible urge in the creative artist.","Albert Einstein","creative,artist,true"
+"Everyone has inside of him a piece of good news. The good news is that you don't know how great you can be! How much you can love! What you can accomplish! And what your potential is!","Anne Frank","love,good,great"
+"I believe that being successful means having a balance of success stories across the many areas of your life. You can't truly be considered successful in your business life if your home life is in shambles.","Zig Ziglar","home,life,business"
+"I am enough of an artist to draw freely upon my imagination.","Albert Einstein","artist,draw,freely"
+"It is easy to hate and it is difficult to love. This is how the whole scheme of things works. All good things are difficult to achieve; and bad things are very easy to get.","Confucius","good,achieve,hate"
+"Choose a job you love, and you will never have to work a day in your life.","Confucius","work,love,choose"
+"If you have only one smile in you give it to the people you love.","Maya Angelou","valentine's-day"
+"Sculpture is the art of the intelligence.","Pablo Picasso","sculpture"
+"A lie can travel half way around the world while the truth is putting on its shoes.","Mark Twain","misattributed-mark-twain,truth"
+"Only a life lived for others is a life worthwhile.","Albert Einstein","others,worthwhile,lived"
+"Never tell the truth to people who are not worthy of it.","Mark Twain","truth"
+"Success is like reaching an important birthday and finding you're exactly the same.","Audrey Hepburn","birthday,finding,reaching"
+"My great hope is to laugh as much as I cry; to get my work done and try to love somebody and have the courage to accept the love in return.","Maya Angelou","being-loved,courage,laugh,love,loving"
+"The ladder of success is best climbed by stepping on the rungs of opportunity.","Ayn Rand","best,ladder,stepping"
+"Learn from yesterday, live for today, hope for tomorrow. The important thing is not to stop questioning.","Albert Einstein","today,learn,tomorrow"
+"I believe that unarmed truth and unconditional love will have the final word in reality. This is why right, temporarily defeated, is stronger than evil triumphant.","Martin Luther King Jr.","good-and-evil,love,truth"
+"We must accept finite disappointment, but never lose infinite hope.","Martin Luther King Jr.","hope"
+"We have always held to the hope, the belief, the conviction that there is a better life, a better world, beyond the horizon.","Franklin D. Roosevelt","hope,conviction,belief"
+"I simply can't build my hopes on a foundation of confusion, misery and death... I think... peace and tranquillity will return again.","Anne Frank","death,peace,foundation"
+"You can make positive deposits in your own economy every day by reading and listening to powerful, positive, life-changing content and by associating with encouraging and hope-building people.","Zig Ziglar","positive,reading,listening"
+"Youth is easily deceived because it is quick to hope.","Aristotle","youth,deceived,quick"
+"Speak the truth, do not yield to anger; give, if thou art asked for little; by these three steps thou wilt go near the gods.","Confucius","truth,anger,speak"
+"Knowing yourself is the beginning of all wisdom.","Aristotle","introspection,self-discovery,wisdom"
+"Art washes away from the soul the dust of everyday life.","Pablo Picasso","art"
+"A person with a new idea is a crank until the idea succeeds.","Mark Twain","crank,succeeds,until"
+"I would rather walk with a friend in the dark, than alone in the light.","Helen Keller","friendship"
+"Remembering that I'll be dead soon is the most important tool I've ever encountered to help me make the big choices in life. Because almost everything - all external expectations, all pride, all fear of embarrassment or failure - these things just fall away in the face of death, leaving only what is truly important.","Steve Jobs","failure,death,fear"
+"The overwhelming majority of Americans are possessed of two great qualities a sense of humor and a sense of proportion.","Franklin D. Roosevelt","great,qualities,majority"
+"Part of the secret of a success in life is to eat what you like and let the food fight it out inside.","Mark Twain","food,life,fight"
+"Love is a better teacher than duty.","Albert Einstein","teacher,duty"
+"In the flush of love's light, we dare be brave. And suddenly we see that love costs all we are, and will ever be. Yet it is only love which sets us free.","Maya Angelou","light,free,brave"
+"I speak to everyone in the same way, whether he is the garbage man or the president of the university.","Albert Einstein","life,respect"
+"You cannot climb the ladder of success dressed in the costume of failure.","Zig Ziglar","failure,ladder,climb"
+"Keep away from people who try to belittle your ambitions. Small people always do that, but the really great make you feel that you, too, can become great.","Mark Twain","greatness"
+"Your work is going to fill a large part of your life, and the only way to be truly satisfied is to do what you believe is great work. And the only way to do great work is to love what you do. If you haven't found it yet, keep looking. Don't settle. As with all matters of the heart, you'll know when you find it.","Steve Jobs","life,love,work"
+"The beauty of a woman must be seen from in her eyes, because that is the doorway to her heart, the place where love resides.","Audrey Hepburn","beauty,eyes,heart"
+"A person will sometimes devote all his life to the development of one part of his body - the wishbone.","Robert Frost","body,devote,wishbone"
+"People are basically the same the world over. Everybody wants the same things - to be happy, to be healthy, to be at least reasonably prosperous, and to be secure. They want friends, peace of mind, good family relationships, and hope that tomorrow is going to be even better than today.","Zig Ziglar","family,good,peace"
+"How wonderful it is that nobody need wait a single moment before starting to improve the world.","Anne Frank","moment,improve,wait"
+"Nothing is impossible, the word itself says 'I'm possible'!","Audrey Hepburn","impossible,possible"
+"We should measure welfare's success by how many people leave welfare, not by how many are added.","Ronald Reagan","welfare,measure,leave"
+"We've got this gift of love, but love is like a precious plant. You can't just accept it and leave it in the cupboard or just think it's going to get on by itself. You've got to keep watering it. You've got to really look after it and nurture it.","John Lennon","relationship,nurture,gift"
+"I write some country music. There's a song called 'I Hope You Dance.' Incredible. I was going to write that poem; somebody beat me to it.","Maya Angelou","music,dance,song"
+"Painting is just another way of keeping a diary.","Pablo Picasso","painting,diary,keeping"
+"If you find it in your heart to care for somebody else, you will have succeeded.","Maya Angelou","care,heart,succeeded"
+"There is a very fine line between loving life and being greedy for it.","Maya Angelou","greed,life"
+"We love the things we love for what they are.","Robert Frost","love,poetry"
+"I decided, very early on, just to accept life unconditionally; I never expected it to do anything special for me, yet I seemed to accomplish far more than I had ever hoped. Most of the time it just happened to me without my ever seeking it.","Audrey Hepburn","time,accept,special"
+"Count your age by friends, not years. Count your life by smiles, not tears.","John Lennon","happiness,timelessness,wisdom"
+"I remember a specific moment, watching my grandmother hang the clothes on the line, and her saying to me, 'you are going to have to learn to do this,' and me being in that space of awareness and knowing that my life would not be the same as my grandmother's life.","Oprah Winfrey","space,learn,moment"
+"When you're in jail, a good friend will be trying to bail you out. A best friend will be in the cell next to you saying, 'Damn, that was fun'.","Groucho Marx","friends,humor,jail"
+"I believe that a simple and unassuming manner of life is best for everyone, best both for the body and the mind.","Albert Einstein","best,simple,body"
+"When you are courting a nice girl an hour seems like a second. When you sit on a red-hot cinder a second seems like an hour. That's relativity.","Albert Einstein","humor"
+"In a good bookroom you feel in some mysterious way that you are absorbing the wisdom contained in all the books through your skin, without even opening them.","Mark Twain","bookroom,books,libraries,wisdom"
+"Loving someone liberates the lover as well as the beloved. And that kind of love comes with age.","Maya Angelou","age,lover,loving"
+"For the past 33 years, I have looked in the mirror every morning and asked myself: 'If today were the last day of my life, would I want to do what I am about to do today?' And whenever the answer has been 'No' for too many days in a row, I know I need to change something.","Steve Jobs","change,morning,mirror"
+"Never memorize something that you can look up.","Albert Einstein","humor,science"
+"He that lives upon hope will die fasting.","Benjamin Franklin","die,fasting,lives"
+"I always entertain great hopes.","Robert Frost","great,hopes,entertain"
+"Lord save us all from old age and broken health and a hope tree that has lost the faculty of putting out blossoms.","Mark Twain","health,age,tree"
+"Hope is a waking dream.","Aristotle","dreams,hope"
+"In the end, that's what this election is about. Do we participate in a politics of cynicism or a politics of hope?","Barack Obama","politics,election,cynicism"
+"A clever person solves a problem. A wise person avoids it.","Albert Einstein","8292-likes"
+"Reader, suppose you were an idiot. And suppose you were a member of Congress. But I repeat myself.","Mark Twain","humor,politics"
+"Substitute 'damn' every time you're inclined to write 'very;' your editor will delete it and the writing will be just as it should be.","Mark Twain","humor,writing"
+"Science without religion is lame, religion without science is blind.","Albert Einstein","religion,science"
+"I love people who make me laugh. I honestly think it's the thing I like most, to laugh. It cures a multitude of ills. It's probably the most important thing in a person.","Audrey Hepburn","love,honestly,laugh"
+"When someone shows you who they are believe them; the first time.","Maya Angelou","oprah-s-thank-you-game,people"
+"Reality is merely an illusion, albeit a very persistent one.","Albert Einstein","reality"
+"I attribute my success to this - I never gave or took any excuse.","Florence Nightingale","motivational,excuse"
+"Whatever you want to do, if you want to be great at it, you have to love it and be able to make sacrifices for it.","Maya Angelou","great,whatever,sacrifices"
+"Don’t go around saying the world owes you a living. The world owes you nothing. It was here first.","Mark Twain","living,world"
+"I believe in everything until it's disproved. So I believe in fairies, the myths, dragons. It all exists, even if it's in your mind. Who's to say that dreams and nightmares aren't as real as the here and now?","John Lennon","beatles,dragons,dreamers,dreaming,dreams,fairies,faith,mythology,nightmares,reality,truth"
+"Knowledge is love and light and vision.","Helen Keller","knowledge,light,vision"
+"Honesty and integrity are absolutely essential for success in life - all areas of life. The really good news is that anyone can develop both honesty and integrity.","Zig Ziglar","success,good,integrity"
+"Life is what happens while you are busy making other plans.","John Lennon","busy,plans,happens"
+"The more one does and sees and feels, the more one is able to do, and the more genuine may be one's appreciation of fundamental things like home, and love, and understanding companionship.","Amelia Earhart","home,genuine,feels"
+"If we knew what it was we were doing, it would not be called research, would it?","Albert Einstein","science"
+"Don't go around saying the world owes you a living. The world owes you nothing. It was here first.","Mark Twain","owes,living,saying"
+"Faith is taking the first step even when you can't see the whole staircase.","Martin Luther King Jr.","inspirational-faith"
+"The superior man makes the difficulty to be overcome his first interest; success only comes later.","Confucius","overcome,difficulty"
+"I have no special talents. I am only passionately curious.","Albert Einstein","inspirational-life"
+"Good friends, good books and a sleepy conscience: this is the ideal life.","Mark Twain","good,books,conscience"
+"Well, Art is Art, isn't it? Still, on the other hand, water is water. And east is east and west is west and if you take cranberries and stew them like applesauce they taste much more like prunes than rhubarb does. Now you tell me what you know.","Groucho Marx","water,hand,taste"
+"When I was 5 years old, my mother always told me that happiness was the key to life. When I went to school, they asked me what I wanted to be when I grew up. I wrote down ‘happy’. They told me I didn’t understand the assignment, and I told them they didn’t understand life.","John Lennon","inspirational,life"
+"Whether you think you can, or you think you can't--you're right.","Henry Ford","thinking"
+"In the end, we will remember not the words of our enemies, but the silence of our friends.","Martin Luther King Jr.","inspirational"
+"I should have no objection to go over the same life from its beginning to the end: requesting only the advantage authors have, of correcting in a second edition the faults of the first.","Benjamin Franklin","beginning,advantage,second"
+"Think of all the beauty still left around you and be happy.","Anne Frank","anne,beauty,frank,happy"
+"The most important thing is to enjoy your life—to be happy—it's all that matters.","Audrey Hepburn","happiness,life"
+"If a cluttered desk is a sign of a cluttered mind, of what, then, is an empty desk a sign?","Albert Einstein","einstein,human,humor,philosophy,stupidity"
+"Never be bullied into silence. Never allow yourself to be made a victim. Accept no one’s definition of your life; define yourself.","Robert Frost","6617-likes"
+"Everything is clearer when you're in love.","John Lennon","love"
+"I did not attend his funeral, but I sent a nice letter saying I approved of it.","Mark Twain","classic-insult,funeral,funny,humor"
+"What is a friend? A single soul dwelling in two bodies.","Aristotle","friendship,soul"
+"God created war so that Americans would learn geography.","Mark Twain","americans,geography,war"
+"My mission in life is not merely to survive, but to thrive; and to do so with some passion, some compassion, some humor, and some style.","Maya Angelou","life,humor,compassion"
+"Love is the greatest refreshment in life.","Pablo Picasso","life,greatest"
+"We delight in the beauty of the butterfly, but rarely admit the changes it has gone through to achieve that beauty.","Maya Angelou","inspiration"
+"Gravitation is not responsible for people falling in love.","Albert Einstein","love"
+"As usual, there is a great woman behind every idiot.","John Lennon","beatles,men,women"
+"Either write something worth reading or do something worth writing.","Benjamin Franklin","hmmm"
+"If you can't fly then run, if you can't run then walk, if you can't walk then crawl, but whatever you do you have to keep moving forward.","Martin Luther King Jr.","inspirational"
+"If people are good only because they fear punishment, and hope for reward, then we are a sorry lot indeed.","Albert Einstein","good,fear,punishment"
+"I was gratified to be able to answer promptly, and I did. I said I didn’t know.","Mark Twain","humor,knowledge"
+"But who prays for Satan? Who, in eighteen centuries, has had the common humanity to pray for the one sinner that needed it most?","Mark Twain","humor-satan"
+"If money is your hope for independence you will never have it. The only real security that a man will have in this world is a reserve of knowledge, experience, and ability.","Henry Ford","knowledge,money,experience"
+"I love to see a young girl go out and grab the world by the lapels. Life's a bitch. You've got to go out and kick ass.","Maya Angelou","humor,inspirational,life"
+"Never allow someone to be your priority while allowing yourself to be their option.","Mark Twain","heartbreak"
+"The important thing is to not stop questioning. Curiosity has its own reason for existence. One cannot help but be in awe when he contemplates the mysteries of eternity, of life, of the marvelous structure of reality. It is enough if one tries merely to comprehend a little of this mystery each day.—"Old Man's Advice to Youth: 'Never Lose a Holy Curiosity.'"LIFE Magazine (2 May 1955) p. 64","Albert Einstein","1955,curiosity,mystery,physics,science"
+"A youth, when at home, should be filial and, abroad, respectful to his elders. He should be earnest and truthful. He should overflow in love to all and cultivate the friendship of the good. When he has time and opportunity, after the performance of these things, he should employ them in polite studies.","Confucius","love,time,home"
+"No tears in the writer, no tears in the reader. No surprise in the writer, no surprise in the reader.","Robert Frost","reading,writing"
+"Try not to become a man of success. Rather become a man of value.","Albert Einstein","adulthood,success,value"
+"Turn your wounds into wisdom.","Oprah Winfrey","experience,inspirational,pain,wisdom,wounds"
+"I thank God I'm myself and for the life I'm given to live and for friends and lovers and beloveds, and I thank God for knowing that all those people have already paid for me.","Maya Angelou","god,knowing,lovers"
+"Every child is an artist. The problem is how to remain an artist once he grows up.","Pablo Picasso","art,attributed-no-source"
+"Loyalty to country ALWAYS. Loyalty to government, when it deserves it.","Mark Twain","patriotism,politics"
+"Wrinkles should merely indicate where the smiles have been.","Mark Twain","age"
+"Courage is the most important of all the virtues because without courage, you can't practice any other virtue consistently.","Maya Angelou","character,consistency,courage,determination,essence,ethos,fortitude,goodness,inspiration,life-lessons,persistence,resolve,self-reliance,strength,virtue,virtues"
+"Any fool can know. The point is to understand.","Albert Einstein","knowledge,learning,understanding,wisdom"
+"Life would be infinitely happier if we could only be born at the age of eighty and gradually approach eighteen.","Mark Twain","age,born,approach"
+"And I loved Fats Waller. I love his instrumental abilities, his vocal abilities and his sense of humor.","Paul McCartney","humor,loved,ability"
+"The best and most beautiful things in the world cannot be seen or even touched. They must be felt with the heart","Helen Keller","by-anne-sullivan,emotions,feelings"
+"No one wants to die. Even people who want to go to heaven don't want to die to get there. And yet death is the destination we all share. No one has ever escaped it. And that is as it should be, because Death is very likely the single best invention of Life. It is Life's change agent. It clears out the old to make way for the new.","Steve Jobs","death,change,best"
+"Rhetoric may be defined as the faculty of observing in any given case the available means of persuasion. This is not a function of any other art.","Aristotle","persuasion,rhetoric"
+"I don't trust people who don't love themselves and tell me, 'I love you.' ... There is an African saying which is: Be careful when a naked person offers you a shirt.","Maya Angelou","1997,annie-clark-tanner-lecture,inspirational,love,trust"
+"I've missed more than 9000 shots in my career. I've lost almost 300 games. 26 times, I've been trusted to take the game winning shot and missed. I've failed over and over and over again in my life. And that is why I succeed.","Michael Jordan","inspirational"
+"Once you can accept the universe as matter expanding into nothing that is something, wearing stripes with plaid comes easy.","Albert Einstein","funny"
+"Microsoft has had two goals in the last 10 years. One was to copy the Mac, and the other was to copy Lotus' success in the spreadsheet - basically, the applications business. And over the course of the last 10 years, Microsoft accomplished both of those goals. And now they are completely lost.","Steve Jobs","business,goals,lost"
+"Try to look at your weakness and convert it into your strength. That's success.","Zig Ziglar","strength,weakness,convert"
+"A woman's heart should be so hidden in God that a man has to seek Him just to find her.","Maya Angelou","inspirational"
+"What material success does is provide you with the ability to concentrate on other things that really matter. And that is being able to make a difference, not only in your own life, but in other people's lives.","Oprah Winfrey","life,ability,difference"
+"You can't connect the dots looking forward; you can only connect them looking backwards. So you have to trust that the dots will somehow connect in your future. You have to trust in something - your gut, destiny, life, karma, whatever. This approach has never let me down, and it has made all the difference in my life.","Steve Jobs","future,trust,karma"
+"What we have once enjoyed we can never lose. All that we love deeply becomes a part of us.","Helen Keller","lose,becomes,enjoyed"
+"Music was my refuge. I could crawl into the space between the notes and curl my back to loneliness.","Maya Angelou","loneliness,music,refuge"
+"Love many things, for therein lies the true strength, and whosoever loves much performs much, and can accomplish much, and what is done in love is done well.","Vincent Van Gogh","strength,true,accomplish"
+"By three methods we may learn wisdom: First, by reflection, which is noblest; Second, by imitation, which is easiest; and third by experience, which is the bitterest.","Confucius","wisdom"
+"Humor must not professedly teach and it must not professedly preach, but it must do both if it would live forever.","Mark Twain","teach,forever,preach"
+"I met Woz when I was 13, at a friend's garage. He was about 18. He was, like, the first person I met who knew more electronics than I did at that point. We became good friends, because we shared an interest in computers and we had a sense of humor. We pulled all kinds of pranks together.","Steve Jobs","good,computers,together"
+"The purpose of art is washing the dust of daily life off our souls.","Pablo Picasso","life,daily,purpose"
+"If I were not a physicist, I would probably be a musician. I often think in music. I live my daydreams in music. I see my life in terms of music.","Albert Einstein","music"
+"Only in the darkness can you see the stars.","Martin Luther King Jr.","hope,inspirational"
+"No person is your friend who demands your silence, or denies your right to grow.","Alice Walker","silence,friend,grow"
+"The Bush Administration's failure to be consistently involved in helping Israel achieve peace with the Palestinians has been both wrong for our friendship with Israel, as well as badly damaging to our standing in the Arab world.","Barack Obama","failure,peace,achieve"
+"The most difficult thing is the decision to act, the rest is merely tenacity. The fears are paper tigers. You can do anything you decide to do. You can act to change and control your life; and the procedure, the process is its own reward.","Amelia Earhart","change,decision,control"
+"The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.","Albert Einstein","change,deep-thoughts,thinking,world"
+"Be true to the game, because the game will be true to you. If you try to shortcut the game, then the game will shortcut you. If you put forth the effort, good things will be bestowed upon you. That's truly about the game, and in some ways that's about life too.","Michael Jordan","good,effort,game"
+"Art is the lie that enables us to realize the truth.","Pablo Picasso","art,truth"
+"The more I see, the less I know for sure.","John Lennon","beatles,life,perception,truth"
+"I do not fear death. I had been dead for billions and billions of years before I was born, and had not suffered the slightest inconvenience from it.","Mark Twain","death,inspirational"
+"Books are for people who wish they were somewhere else.","Mark Twain","books-reading"
+"Those who educate children well are more to be honored than they who produce them; for these only gave them life, those the art of living well.","Aristotle","greek,mentors,philosophy,teachers"
+"What would men be without women? Scarce, sir...mighty scarce.","Mark Twain","humor,men,women"
+"The secret source of humor is not joy but sorrow; there is no humor in Heaven.","Mark Twain","joy,heaven,sorrow"
+"I know not with what weapons World War III will be fought, but World War IV will be fought with sticks and stones.","Albert Einstein","albert-einstein,future,war,wisdom"
+"Success is liking yourself, liking what you do, and liking how you do it.","Maya Angelou","4051-likes"
+"You never fail until you stop trying.","Albert Einstein","inspirational"
+"We all know that Art is not truth. Art is a lie that makes us realize the truth, at least the truth that is given to us to understand.","Pablo Picasso","truth,lie,understand"
+"Great spirits have always encountered violent opposition from mediocre minds.","Albert Einstein","greatness,opposition,spirit"
+"I did then what I knew how to do. Now that I know better, I do better.","Maya Angelou","attributed,attributed-no-source,education,intelligence,knowledge,unsourced"
+"Some say the world will end in fire,Some say in ice.From what I've tasted of desire,I hold with those who favor fire. But if it had to perish twiceI think I know enough of hateTo say that for destruction iceIs also greatAnd would suffice.","Robert Frost","poetry"
+"Sanity and happiness are an impossible combination.","Mark Twain","happiness,sanity"
+"My mission in life is not merely to survive, but to thrive; and to do so with some passion, some compassion, some humor, and some style","Maya Angelou","3758-likes"
+"A poem begins as a lump in the throat, a sense of wrong, a homesickness, a lovesickness.","Robert Frost","poetry"
+"Clothes make the man. Naked people have little or no influence on society.","Mark Twain","clothing"
+"Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.","Mark Twain","illusions,life"
+"I can shake off everything as I write; my sorrows disappear, my courage is reborn.","Anne Frank","writing"
+"The aim of art is to represent not the outward appearance of things, but their inward significance.","Aristotle","art,philosophy"
+"Our brand of democracy is hard. But I can promise that a year from now, when I no longer hold this office, I'll be right there with you as a citizen - inspired by those voices of fairness and vision, of grit and good humor and kindness that have helped America travel so far.","Barack Obama","travel,good,kindness"
+"If you hear a voice within you say 'you cannot paint,' then by all means paint, and that voice will be silenced.","Vincent Van Gogh","voice,within,paint"
+"From the moment I picked up your book until I put it down, I was convulsed with laughter. Some day I intend reading it.","Groucho Marx","books,humor,reading"
+"The most common way people give up their power is by thinking they don't have any.","Alice Walker","inspirational"
+"I'll always stay connected with Apple. I hope that throughout my life I'll sort of have the thread of my life and the thread of Apple weave in and out of each other, like a tapestry. There may be a few years when I'm not there, but I'll always come back.","Steve Jobs","life,connected,apple"
+"There can be no deep disappointment where there is not deep love.","Martin Luther King Jr.","justice,love,segregation"
+"Have enough courage to trust love one more time and always one more time.","Maya Angelou","courage,love,trust"
+"Achievement of your happiness is the only moral purpose of your life, and that happiness, not pain or mindless self-indulgence, is the proof of your moral integrity, since it is the proof and the result of your loyalty to the achievement of your values.","Ayn Rand","happiness,pain,integrity"
+"If you pick up a starving dog and make him prosperous he will not bite you. This is the principal difference between a dog and man.","Mark Twain","animals,dogs"
+"When I was a boy of 14, my father was so ignorant I could hardly stand to have the old man around. But when I got to be 21, I was astonished at how much the old man had learned in seven years.","Mark Twain","arrogance,humor,ignorance"
+"Everything human is pathetic. The secret source of humor itself is not joy but sorrow. There is no humor in heaven.","Mark Twain","joy,heaven,sorrow"
+"Always do what is right. It will gratify half of mankind and astound the other.","Mark Twain","morals"
+"The artist is a receptacle for emotions that come from all over the place: from the sky, from the earth, from a scrap of paper, from a passing shape, from a spider's web.","Pablo Picasso","sky,earth,emotions"
+"There is more to sex appeal than just measurements. I don't need a bedroom to prove my womanliness. I can convey just as much sex appeal, picking apples off a tree or standing in the rain.","Audrey Hepburn","sex-appeal,women"
+"Education is the ability to listen to almost anything without losing your temper or your self-confidence.","Robert Frost","education"
+"Perfect friendship is the friendship of men who are good, and alike in excellence; for these wish well alike to each other qua good, and they are good in themselves.","Aristotle","good,men,excellence"
+"It is not that I'm so smart. But I stay with the questions much longer.","Albert Einstein","intelligence,learning,wisdom"
+"A woman without a man is like a fish without a bicycle.","Gloria Steinem","feminism,humour"
+"The trouble is not in dying for a friend, but in finding a friend worth dying for.","Mark Twain","3011-likes"
+"The more you praise and celebrate your life, the more there is in life to celebrate.","Oprah Winfrey","happiness,life"
+"One thing you can't hide - is when you're crippled inside.","John Lennon","beatles,emotions,hypocrisy,pain,suffering"
+"Fighting for one's freedom, struggling towards being free, is like struggling to be a poet or a good Christian or a good Jew or a good Muslim or good Zen Buddhist. You work all day long and achieve some kind of level of success by nightfall, go to sleep and wake up the next morning with the job still to be done. So you start all over again.","Maya Angelou","work,morning,freedom"
+"If you don't read the newspaper, you're uninformed. If you read the newspaper, you're mis-informed.","Mark Twain","newspapers"
+"There are millions of Americans outside Washington who are tired of stale political arguments and are moving this country forward. They believe, and I believe, that here in America, our success should depend not on accident of birth, but the strength of our work ethic and the scope of our dreams.","Barack Obama","work,strength,dreams"
+"Creativity is intelligence having fun.","Albert Einstein","2946-likes"
+"Wheresoever you go, go with all your heart.","Confucius","2940-likes"
+"The measure of intelligence is the ability to change.","Albert Einstein","adaptation,flexibility,intelligence,open-mindedness,wisdom"
+"Everything has beauty, but not everyone sees it.","Confucius","beauty,everything,seeing"
+"Learn to value yourself, which means: fight for your happiness.","Ayn Rand","fight,happiness,learn"
+"The world is a dangerous place to live, not because of the people who are evil, but because of the people who don't do anything about it.","Albert Einstein","dangerous,evil,people,world"
+"Courage is resistance to fear, mastery of fear - not absence of fear.","Mark Twain","courage"
+"If we couldn't laugh we would all go insane.","Robert Frost","laughter"
+"You can have it all. Just not all at once.","Oprah Winfrey","dreams,life,time"
+"The best thing to hold onto in life is each other.","Audrey Hepburn","inspirational"
+"Remembering that you are going to die is the best way I know to avoid the trap of thinking you have something to lose. You are already naked. There is no reason not to follow your heart.","Steve Jobs","2740-likes"
+"If A is a success in life, then A equals x plus y plus z. Work is x; y is play; and z is keeping your mouth shut","Albert Einstein","albert,einstein,equations,success"
+"Black holes are where God divided by zero.","Albert Einstein","humor"
+"If I’m honest I have to tell you I still read fairy-tales and I like them best of all.","Audrey Hepburn","fairy-tales"
+"Learn from the mistakes of others. You can never live long enough to make them all yourself.","Groucho Marx","attributed-no-source"
+"Your time is limited, so don't waste it living someone else's life. Don't be trapped by dogma - which is living with the results of other people's thinking. Don't let the noise of others' opinions drown out your own inner voice. And most important, have the courage to follow your heart and intuition.","Steve Jobs","courage,time,heart"
+"You can only become truly accomplished at something you love. Don’t make money your goal. Instead pursue the things you love doing and then do them so well that people can’t take their eyes off of you.","Maya Angelou","2637-likes"
+"Everyone is a moon, and has a dark side which he never shows to anybody.","Mark Twain","darkness,human-nature,moon"
+"Change will not come if we wait for some other person, or if we wait for some other time. We are the ones we've been waiting for. We are the change that we seek.","Barack Obama","change,inspirational"
+"It's not the size of the dog in the fight, it's the size of the fight in the dog.","Mark Twain","bravery,dog,famous,inspirational,valour"
+"Kindness is a language which the deaf can hear and the blind can see.","Mark Twain","inspirational,kindness"
+"There's nowhere you can be that isn't where you're meant to be...","John Lennon","beatles,destiny,fate,life"
+"Never make someone a priority when all you are to them is an option.","Maya Angelou","2570-likes"
+"Nothing in the world is more dangerous than sincere ignorance and conscientious stupidity.","Martin Luther King Jr.","ignorance"
+"The ultimate measure of a man is not where he stands in moments of comfort and convenience, but where he stands at times of challenge and controversy.","Martin Luther King Jr.","challanges"
+"If there is any one secret of success, it lies in the ability to get the other person's point of view and see things from that person's angle as well as from your own.","Henry Ford","ability,view,point"
+"Forgive, O Lord, my little jokes on TheeAnd I'll forgive Thy great big one on me.","Robert Frost","humor,poetry,religion"
+"Procrastination is one of the most common and deadliest of diseases and its toll on success and happiness is heavy.","Wayne Gretzky","happiness,heavy,diseases"
+"Humor is mankind's greatest blessing.","Mark Twain","humor"
+"I've lived through some terrible things in my life, some of which actually happened.","Mark Twain","humor"
+"Tell me and I forget, teach me and I may remember, involve me and I learn.","Benjamin Franklin","learning,mentoring,parenting,teacher"
+"The best way to cheer yourself is to cheer somebody else up.","Albert Einstein","2471-likes"
+"Lack of direction, not lack of time, is the problem. We all have twenty-four hour days.","Zig Ziglar","attention,direction,focus,inspirational,motivational"
+"The secret to humor is surprise.","Aristotle","humor,surprise"
+"Name the greatest of all inventors. Accident.","Mark Twain","inventing-humor"
+"Behind every beautiful thing, there's some kind of pain.","Bob Dylan","beautiful,behind,kind,of,pain,thing"
+"Everything must be made as simple as possible. But not simpler.","Albert Einstein","einstein,paraphrased,science,systems"
+"Humor is the most engaging cowardice.","Robert Frost","cowardice,engaging"
+"Try to be a rainbow in someone's cloud.","Maya Angelou","cloud,rainbow"
+"You believe in a book that has talking animals, wizards, witches, demons, sticks turning into snakes, burning bushes, food falling from the sky, people walking on water, and all sorts of magical, absurd and primitive stories, and you say that we are the ones that need help?","Mark Twain","books-bible"
+"The best way to cheer yourself is to try to cheer someone else up.","Mark Twain","happiness"
+"When you trip over love, it is easy to get up. But when you fall in love, it is impossible to stand again.","Albert Einstein","love"
+"Being the richest man in the cemetery doesn't matter to me. Going to bed at night saying we've done something wonderful... that's what matters to me.","Steve Jobs","2356-likes"
+"I never forget a face, but in your case I'll be glad to make an exception.","Groucho Marx","humor"
+"You may encounter many defeats, but you must not be defeated. In fact, it may be necessary to encounter the defeats, so you can know who you are, what you can rise from, how you can still come out of it.","Maya Angelou","adversity,character,failure,perseverance"
+"Parents can only give good advice or put them on the right paths, but the final forming of a person's character lies in their own hands.","Anne Frank","growing-up,innocence,inspirational,parenting,personal-responsibility,right-of-passage,self-determination,self-responsibility"
+"Humor is reason gone mad.","Groucho Marx","reason,mad,gone"
+"What is right is not always popular and what is popular is not always right.","Albert Einstein","2280-likes"
+"Freedom lies in being bold.","Robert Frost","boldness,courage,freedom"
+"Be thankful for what you have; you'll end up having more. If you concentrate on what you don't have, you will never, ever have enough","Oprah Winfrey","appreciation,contentment,dissatisfaction,life,perception,positivity,thankfulness"
+"Education: the path from cocky ignorance to miserable uncertainty.","Mark Twain","education"
+"If you would be loved, love, and be loveable.","Benjamin Franklin","loved"
+"If I am not good to myself, how can I expect anyone else to be good to me?","Maya Angelou","2202-likes"
+"It does not matter how slowly you go as long as you do not stop.","Confucius","education,inspirational,life,perseverance"
+"Ask for what you want and be prepared to get it!","Maya Angelou","inspirational"
+"You alone are enough. You have nothing to prove to anybody.","Maya Angelou","2179-likes"
+"I was born with an enormous need for affection, and a terrible need to give it.","Audrey Hepburn","born,affection"
+"The easy confidence with which I know another man's religion is folly teaches me to suspect that my own is also.","Mark Twain","humor,philosophy,religion"
+"Adventure is worthwhile in itself.","Amelia Earhart","2126-likes"
+"Intelligence plus character-that is the goal of true education.","Martin Luther King Jr.","education"
+"If everyone demanded peace instead of another television set, then there'd be peace.","John Lennon","activism,beatles,materialism,peace,society,television"
+"The pursuit of truth and beauty is a sphere of activity in which we are permitted to remain children all our lives.","Albert Einstein","wonder"
+"In the middle of difficulty lies opportunity","Albert Einstein","2072-likes"
+"The best way out is always through.","Robert Frost","best"
+"Whoever is careless with the truth in small matters cannot be trusted with important matters","Albert Einstein","character,honesty,integrity"
+"I believe in God, but not as one thing, not as an old man in the sky. I believe that what people call God is something in all of us. I believe that what Jesus and Mohammed and Buddha and all the rest said was right. It's just that the translations have gone wrong.","John Lennon","god,organized-religion,religion,spirituality"
+"I believe in being strong when everything seems to be going wrong. I believe that happy girls are the prettiest girls. I believe that tomorrow is another day, and I believe in miracles","Audrey Hepburn","life"
+"Men marry women with the hope they will never change. Women marry men with the hope they will change. Invariably they are both disappointed.","Albert Einstein","battle-of-the-sexes,gender-stereotypes,marriage,men-and-women"
+"Stepping onto a brand-new path is difficult, but not more difficult than remaining in a situation, which is not nurturing to the whole woman.","Maya Angelou","change,inspirational,self-determination"
+"Poetry is what gets lost in translation.","Robert Frost","poetry"
+"You know you are on the road to success if you would do your job, and not be paid for it.","Oprah Winfrey","road,paid"
+"The secret of life is honesty and fair dealing. If you can fake that, you've got it made.","Groucho Marx","humor,life"
+"The desire to reach for the stars is ambitious. The desire to reach hearts is wise.","Maya Angelou","ambition,inspirational,wisdom"
+"Peace cannot be kept by force; it can only be achieved by understanding.","Albert Einstein","force,peace"
+"Half the world is composed of people who have something to say and can't, and the other half who have nothing to say and keep on saying it.","Robert Frost","new-england-wisdom"
+"He who knows all the answers has not been asked all the questions.","Confucius","wisdom"
+"The secret to getting ahead is getting started.","Mark Twain","starting,working"
+"A question that sometimes drives me hazy: am I or are the others crazy?","Albert Einstein","crazy,question,thought-provoking"
+"If I'm the people's poet, then I ought to be in people's hands - and, I hope, in their heart.","Maya Angelou","heart,hands,ought"
+"Living is Easy with Eyes Closed.","John Lennon","beatles,blindness,ignorance,life"
+"If you make a mistake and do not correct it, this is called a mistake.","Confucius","inspirational,mistake"
+"Happiness depends upon ourselves.","Aristotle","happiness"
+"Two roads diverged in a wood, and I -I took the one less traveled by,And that has made all the difference.","Robert Frost","choice,consequence,individuality,inspirational"
+"You can't blame gravity for falling in love.","Albert Einstein","gravity,blame"
+"All you need in this life is ignorance and confidence; then success is sure.","Mark Twain","humor,success"
+"The best way to not feel hopeless is to get up and do something. Don’t wait for good things to happen to you. If you go out and make some good things happen, you will fill the world with hope, you will fill yourself with hope.","Barack Obama","determination,helping-others,hope,initiative,just-do-it,proactivity,service,volunteerism,work"
+"Imagination is everything. It is the preview of life's coming attractions.","Albert Einstein","creativity,fantasy,imagination,inspirational"
+"Any book that helps a child to form a habit of reading, to make reading one of his deep and continuing needs, is good for him.","Maya Angelou","books,inspirational,library"
+"The highest use of capital is not to make more money, but to make money do more for the betterment of life.","Henry Ford","money,highest,betterment"
+"Educating the mind without educating the heart is no education at all.","Aristotle","education,ethics"
+"It's hard to beat a person who never gives up.","Babe Ruth","hard-work,inspiration,teamwork"
+"Life is really simple, but we insist on making it complicated.","Confucius","simple,insist"
+"Love is composed of a single soul inhabiting two bodies.","Aristotle","bodies,soul,single"
+"There is probably a perverse pride in my administration... that we were going to do the right thing, even if short-term it was unpopular. And I think anybody who's occupied this office has to remember that success is determined by an intersection in policy and politics and that you can't be neglecting of marketing and P.R. and public opinion.","Barack Obama","politics,pride,marketing"
+"It is the supreme art of the teacher to awaken joy in creative expression and knowledge.","Albert Einstein","teacher,knowledge,joy"
+"I am not a teacher, but an awakener.","Robert Frost","carpe-diem,education,inspirational,learning,mentoring"
+"Excellence is an art won by training and habituation. We do not act rightly because we have virtue or excellence, but we rather have those because we have acted rightly. We are what we repeatedly do. Excellence, then, is not an act but a habit.","Aristotle","training,excellence,act"
+"No matter what happens, or how bad it seems today, life does go on, and it will be better tomorrow.","Maya Angelou","inspirational-quotes"
+"Because paper has more patience than people.","Anne Frank","affirming,inspirational,uplifting"
+"Politics is the art of looking for trouble, finding it everywhere, diagnosing it incorrectly and applying the wrong remedies.","Groucho Marx","politics,wrong,looking"
+"Happiness makes up in height for what it lacks in length.","Robert Frost","happiness"
+"Each person must live their life as a model for others.","Rosa Parks","model,others"
+"If the facts don't fit the theory, change the facts.","Albert Einstein","facts,humor,science"
+"The important thing is not to stop questioning. Curiosity has its own reason for existing.","Albert Einstein","curiosity,philosophy,wonder"
+"What I know is, is that if you do work that you love, and the work fulfills you, the rest will come.","Oprah Winfrey","work,rest,fulfills"
+"Eating is so intimate. It's very sensual. When you invite someone to sit at your table and you want to cook for them, you're inviting a person into your life.","Maya Angelou","food,eating,table"
+"A clear conscience is the sure sign of a bad memory.","Mark Twain","conscience,humor,memory"
+"I must be willing to give up what I am in order to become what I will be.","Albert Einstein","inspirational"
+"I haven't any right to criticize books, and I don't do it except when I hate them. I often want to criticize Jane Austen, but her books madden me so that I can't conceal my frenzy from the reader; and therefore I have to stop every time I begin. Every time I read Pride and Prejudice I want to dig her up and beat her over the skull with her own shin-bone.","Mark Twain","austen,criticism,reading,writing"
+"I hope the millions of people I've touched have the optimism and desire to share their goals and hard work and persevere with a positive attitude.","Michael Jordan","attitude,positive,work"
+"There is no abstract art. You must always start with something. Afterward you can remove all traces of reality.","Pablo Picasso","reality,start,abstract"
+"Love is a better master than duty.","Albert Einstein","love"
+"He that can have patience can have what he will.","Benjamin Franklin","determination,inspirational,patience"
+"When I die, I hope to go to Heaven, whatever the Hell that is.","Ayn Rand","death,hell,heaven"
+"Death is no more than passing from one room into another. But there's a difference for me, you know. Because in that other room I shall be able to see.","Helen Keller","blindness,death,faith,inspirational,wisdom"
+"On every front there are clear answers out there that can make this country stronger, but we're going to break through the fear and the frustration people are feeling. Our job is to make sure that even as we make progress, that we are also giving people a sense of hope and vision for the future.","Barack Obama","future,fear,progress"
+"The biggest adventure you can ever take is to live the life of your dreams.","Oprah Winfrey","adventure,bravery,inspirational,life"
+"Why do they always teach us that it's easy and evil to do what we want and that we need discipline to restrain ourselves? It's the hardest thing in the world--to do what we want. And it takes the greatest kind of courage. I mean, what we really want.","Ayn Rand","career,inspiration,life"
+"No one really knows why they are alive until they know what they'd die for.","Martin Luther King Jr.","death,inspirational"
+"A banker is a fellow who lends you his umbrella when the sun is shining, but wants it back the minute it begins to rain.","Mark Twain","bank,bankers,humor"
+"Happiness is the meaning and the purpose of life, the whole aim and end of human existence.","Aristotle","happiness"
+"A Penny Saved is a Penny Earned","Benjamin Franklin","funny,inspirational,money"
+"If someone thinks that peace and love are just a cliche that must have been left behind in the 60s, that's a problem. Peace and love are eternal.","John Lennon","1960s,beatles,love,peace,sixties"
+"It would be possible to describe everything scientifically, but it would make no sense; it would be without meaning, as if you described a Beethoven symphony as a variation of wave pressure.","Albert Einstein","art,cybernetics,science"
+"Your time is limited, so don't waste it living someone else's life. Don't be trapped by dogma - which is living with the results of other people's thinking. Don't let the noise of other's opinions drown out your own inner voice. And most important, have the courage to follow your heart and intuition. They somehow already know what you truly want to become. Everything else is secondary.","Steve Jobs","death,life,r-i-p,steve"
+"The most important thing is to enjoy your life - to be happy - it's all that matters.","Audrey Hepburn","happy,enjoy,matters"
+"Those who are not looking for happiness are the most likely to find it, because those who are searching forget that the surest way to be happy is to seek happiness for others.","Martin Luther King Jr.","happiness"
+"And God help Bruce Springsteen when they decide he's no longer God... They'll turn on him, and I hope he survives it.","John Lennon","god,help,turn"
+"There's a world of difference between truth and facts. Facts can obscure truth.","Maya Angelou","truth"
+"Excellence is never an accident. It is always the result of high intention, sincere effort, and intelligent execution; it represents the wise choice of many alternatives - choice, not chance, determines your destiny.","Aristotle","choice,inspirational"
+"If a man is called to be a street sweeper, he should sweep streets even as a Michaelangelo painted, or Beethoven composed music or Shakespeare wrote poetry. He should sweep streets so well that all the hosts of heaven and earth will pause to say, 'Here lived a great street sweeper who did his job well.","Martin Luther King Jr.","excellence,inspirational"
+"Once I knew only darkness and stillness... my life was without past or future... but a little word from the fingers of another fell into my hand that clutched at emptiness, and my heart leaped to the rapture of living.","Helen Keller","life,future,emptiness"
+"In wine there is wisdom, in beer there is Freedom, in water there is bacteria.","Benjamin Franklin","drinking,humor"
+"A man is a success if he gets up in the morning and gets to bed at night, and in between he does what he wants to do.","Bob Dylan","bob-dylan,music,success"
+"I didn't have time to write a short letter, so I wrote a long one instead.","Mark Twain","humor"
+"The smallest minority on earth is the individual. Those who deny individual rights cannot claim to be defenders of minorities.","Ayn Rand","freedom-of-thought,philosophy,politics,rights"
+"The way a team plays as a whole determines its success. You may have the greatest bunch of individual stars in the world, but if they don't play together, the club won't be worth a dime.","Babe Ruth","sports,team,together"
+"Those are my principles, and if you don't like them...well I have others.","Groucho Marx","humor"
+"Home is the place where, when you have to go there, They have to take you in.","Robert Frost","inspirational-quotes"
+"Many people die at twenty five and aren't buried until they are seventy five.","Benjamin Franklin","death,life,wordplay"
+"Fear not death for the sooner we die, the longer we shall be immortal.","Benjamin Franklin","death"
+"I never made one of my discoveries through the process of rational thinking","Albert Einstein","creativity,epiphany,humor,inspiration"
+"Any emotion, if it is sincere, is involuntary.","Mark Twain","inspirational"
+"I must have a prodigious amount of mind; it takes me as much as a week, sometimes, to make it up!","Mark Twain","decisions,humor,mind"
+"The two most important days in your life are the day you are born and the day you find out why.","Mark Twain","birth,born,day,important,life,reasons-why,true"
+"Once we accept our limits, we go beyond them.","Albert Einstein","inspirational,self-improvement,self-knowledge"
+"Alone we can do so little; together we can do so much","Helen Keller","collaboration,life,teamwork"
+"One morning I shot an elephant in my pajamas. How he got in my pajamas I'll never know.","Groucho Marx","humor"
+"The best revenge is massive success.","Frank Sinatra","best,revenge,massive"
+"My favorite things in life don't cost any money. It's really clear that the most precious resource we all have is time.","Steve Jobs","time,money,precious"
+"We, the People, recognize that we have responsibilities as well as rights; that our destinies are bound together; that a freedom which only asks what's in it for me, a freedom without a commitment to others, a freedom without love or charity or duty or patriotism, is unworthy of our founding ideals, and those who died in their defense.","Barack Obama","patriotism,freedom,died"
+"The rain to the wind said,You push and I'll pelt.'They so smote the garden bedThat the flowers actually knelt,And lay lodged--though not dead.I know how the flowers felt.","Robert Frost","poetry,rain"
+"Our goals can only be reached through a vehicle of a plan, in which we must fervently believe, and upon which we must vigorously act. There is no other route to success.","Pablo Picasso","goals,plan,act"
+"To perceive is to suffer.","Aristotle","empathy,life,suffer,understanding"
+"The funniest people are the saddest ones","Confucius","funny,people,sad"
+"I didn't fail the test, I just found 100 ways to do it wrong.","Benjamin Franklin","examinations,failure,humor"
+"Your time is limited, so don't waste it living someone else's life.","Steve Jobs","life,life-and-living"
+"Don't ever take a fence down until you know why it was put up.","Robert Frost","advice,caution,cautionary,inspirational"
+"Life is a series of experiences, each one of which makes us bigger, even though sometimes it is hard to realize this. For the world was built to develop character, and we must learn that the setbacks and grieves which we endure help us in our marching onward.","Henry Ford","learning,setbacks"
+"Government exists to protect us from each other. Where government has gone beyond its limits is in deciding to protect us from ourselves.","Ronald Reagan","freedom-of-choice,government,inspirational,political-philosophy,politics"
+"Be slow in choosing a friend, slower in changing.","Benjamin Franklin","friend,slow,changing"
+"If a man wants you, nothing can keep him away. If he doesn't want you, nothing can make him stay.","Oprah Winfrey","romance"
+"Well, art is art, isn't it? Still, on the other hand, water is water! And east is east and west is west and if you take cranberries and stew them like applesauce they taste much more like prunes than rhubarb does. Now, uh... now you tell me what you know.","Groucho Marx","art,humor"
+"The educated differ from the uneducated as much as the living differ from the dead.","Aristotle","education"
+"When we remember we are all mad, the mysteries disappear and life stands explained.","Mark Twain","remember,mad,explained"
+"I used to think anyone doing anything weird was weird. Now I know that it is the people that call others weird that are weird.","Paul McCartney","beatles,humor,music,nonjudgemental"
+"I have a dream that my four little children will one day live in a nation where they will not be judged by the color of their skin but by the content of their character.","Martin Luther King Jr.","character,dream,inspirational,prejudice,race"
+"Imagination is the highest form of research.","Albert Einstein","inspiration"
+"Wherever you go, go with all your heart.","Confucius","confucius,inspirational"
+"Life is one grand, sweet song, so start the music.","Ronald Reagan","music,song,start"
+"We must live together as brothers or perish together as fools.","Martin Luther King Jr.","brotherhood,inspirational,wisdom"
+"If a black cat crosses your path, it signifies that the animal is going somewhere.","Groucho Marx","humor,superstition"
+"Science investigates; religion interprets. Science gives man knowledge, which is power; religion gives man wisdom, which is control. Science deals mainly with facts; religion deals mainly with values. The two are not rivals.","Martin Luther King Jr.","religion,science"
+"I heard a definition once: Happiness is health and a short memory! I wish I'd invented it, because it is very true.","Audrey Hepburn","happiness,inspirational"
+"The most incomprehensible thing about the world is that it is at all comprehensible.","Albert Einstein","einstein,philosophy,science"
+"I sustain myself with the love of family.","Maya Angelou","family,inspirational"
+"Some people claim that marriage interferes with romance. There's no doubt about it. Anytime you have a romance, your wife is bound to interfere.","Groucho Marx","marriage,romance,sex"
+"Learn the rules like a pro, so you can break them like an artist.","Pablo Picasso","art,creativity,motivational,picasso,rules,writing"
+"I made my first white women friends in college; they loved me and were loyal to our friendship, but I understood, as they did, that they were white women and that whiteness mattered.","Alice Walker","women,college,loved"
+"I've come to believe that each of us has a personal calling that's as unique as a fingerprint - and that the best way to succeed is to discover what you love and then find a way to offer it to others in the form of service, working hard, and also allowing the energy of the universe to lead you.","Oprah Winfrey","individuality,inspirational,success"
+"Everyone here has the sense that right now is one of those moments when we are influencing the future.","Steve Jobs","future,moments"
+"I trust that everything happens for a reason, even if we are not wise enough to see it.","Oprah Winfrey","inspirational"
+"Anyone who stops learning is old, whether at twenty or eighty. Anyone who keeps learning stays young.","Henry Ford","education,lifelong-learning"
+"All I can be is me- whoever that is.","Bob Dylan","bob-dylan,inspirational"
+"The Bible has noble poetry in it... and some good morals and a wealth of obscenity, and upwards of a thousand lies.","Mark Twain","agnosticism,atheism,bible,christianity,mythology,truth"
+"Only those who attempt the absurd can achieve the impossible.","Albert Einstein","achievement,inspirational,optimism"
+"Limitless undying love which shines around me like a million suns it calls me on and on across the universe.","John Lennon","beatles,love,sun,universe"
+"The people who make art their business are mostly imposters.","Pablo Picasso","business,mostly"
+"First best is falling in love. Second best is being in love. Least best is falling out of love. But any of it is better than never having been in love.","Maya Angelou","falling-in-love,love,lovelessness"
+"Breathe. Let go. And remind yourself that this very moment is the only one you know you have for sure.","Oprah Winfrey","life"
+"I'm happy to be a writer - of prose, poetry, every kind of writing. Every person in the world who isn't a recluse, hermit or mute uses words. I know of no other art form that we always use.","Maya Angelou","poetry,happy,words"
+"The dog is a gentleman; I hope to go to his heaven not man's.","Mark Twain","animals,dogs,heaven,inspirational,man,religion"
+"Every strike brings me closer to the next home run.","Babe Ruth","baseball,perseverance,philosophy,sports"
+"As my sufferings mounted I soon realized that there were two ways in which I could respond to my situation -- either to react with bitterness or seek to transform the suffering into a creative force. I decided to follow the latter course.","Martin Luther King Jr.","activism,healing,inspirational,justice,sacrifice"
+"Don’t let the noise of others’ opinions drown out your own inner voice."[Stanford University commencement speech, 2005]","Steve Jobs","apple-computer-inc,identity,independence,inner-voice,life,steve-jobs"
+"When we give cheerfully and accept gratefully, everyone is blessed.","Maya Angelou","charity,compassion,gratitude,helping-others,inspiration"
+"Reading, after a certain age, diverts the mind too much from its creative pursuits. Any man who reads too much and uses his own brain too little falls into lazy habits of thinking.","Albert Einstein","reading,science"
+"There are two kinds of teachers: the kind that fill you with so much quail shot that you can't move, and the kind that just gives you a little prod behind and you jump to the skies.","Robert Frost","inspirational,teachers"
+"Men should think twice before making widowhood women's only path to power.","Gloria Steinem","death,feminism,men,murder,widowhood,women,womens-rights"
+"To be a poet is a condition, not a profession.","Robert Frost","afflictions,on-writing,poetry"
+"How many observe Christ's birthday! How few, His precepts!","Benjamin Franklin","christian-behavior,christmas,religion"
+"He is free to evade reality, he is free to unfocus his mind and stumble blindly down any road he pleases, but not free to avoid the abyss he refuses to see.","Ayn Rand","inspirational"
+"Memories of our lives, of our works and our deeds will continue in others.","Rosa Parks","memories,others"
+"Do not worry about your difficulties in Mathematics. I can assure you mine are still greater.","Albert Einstein","inspirational"
+"Life is either a great adventure or nothing.","Helen Keller","great,adventure"
+"Hide not your talents, they for use were made,What's a sundial in the shade?","Benjamin Franklin","talent,wisdom,work"
+"Student: Dr. Einstein, Aren't these the same questions as last year's [physics] final exam?Dr. Einstein: Yes; But this year the answers are different.","Albert Einstein","humor,science"
+"You cannot open a book without learning something.","Confucius","books,learning,philosopher"
+"It takes a very long time to become young.","Pablo Picasso","ignorance,innocence,knowledge,wisdom,youth"
+"Without leaps of imagination or dreaming, we lose the excitement of possibilities. Dreaming, after all is a form of planning.","Gloria Steinem","dreaming,dreams,excitement,imagination,inspirational,planning,possibility"
+"A Woman in harmony with her spirit is like a river flowing.She goes where she will without pretense and arrives at her destination prepared to be herself and only herself","Maya Angelou","inspirational"
+"We can't help everyone, but everyone can help someone.","Ronald Reagan","help"
+"Wisdom is not a product of schooling but of the lifelong attempt to acquire it.","Albert Einstein","education,knowledge-wisdom,learning,school,science,wisdom"
+"Well done is better than well said.","Benjamin Franklin","inspirational"
+"Anyone who says he can see through women is missing a lot.","Groucho Marx","funny,innuendo"
+"What the superior man seeks is in himself; what the small man seeks is in others.","Confucius","individuality,inspiration,validation"
+"Surround yourself only with people who are going to take you higher.","Oprah Winfrey","inspirational"
+"Whoever is happy will make others happy too.","Anne Frank","happy,others,whoever"
+"Sometimes it's not enough to know what things mean, sometimes you have to know what things don't mean.","Bob Dylan","understanding,wisdom"
+"When you see a good person, think of becoming like her/him. When you see someone not so good, reflect on your own weak points.","Confucius","helpful,inspirational,introspection,karma,self-improvement"
+"If we ever forget that we're one nation under God, then we will be one nation gone under.","Ronald Reagan","inspirational,religon,separation-of-church-and-state"
+"Close your eyes and I'll kiss you, Tomorrow I'll miss you.","Paul McCartney","beatles,kiss,love,miss-you,music,song-lyrics,tomorrow"
+"We all live with the objective of being happy; our lives are all different and yet the same.","Anne Frank","happiness,literature"
+"the time is always right to do the right thing","Martin Luther King Jr.","inspirational"
+"Rich people have small TVs and big libraries, and poor people have small libraries and big TVs.","Zig Ziglar","intelligence,success,wealth"
+"Deliver me from writers who say the way they live doesn't matter. I'm not sure a bad person can write a good book. If art doesn't make us better, then what on earth is it for.","Alice Walker","art,bad,good,writing"
+"I've been lucky. Opportunities don't often come along. So, when they do, you have to grab them.","Audrey Hepburn","inspirational"
+"The truth is not for all men but only for those who seek it.","Ayn Rand","truth"
+"God is a concept by which we measure our pain.","John Lennon","beatles,god,religion"
+"How can I go forward when I don't know which way I'm facing?","John Lennon","life"
+"Love is a promise, love is a souvenir, once given never forgotten, never let it disappear.","John Lennon","attributed-no-source,love"
+"Two roads diverged in a wood and I - I took the one less traveled by, and that has made all the difference.","Robert Frost","roads,difference"
+"I'm not afraid of death because I don't believe in it.It's just getting out of one car, and into another.","John Lennon","death,spirituality"
+"If there is any religion that could respond to the needs of modern science, it would be Buddhism.","Albert Einstein","attributed,attributed-to-einstein-no-source,buddhism,religion,science,unsourced"
+"Instead of cursing the darkness, light a candle.","Benjamin Franklin","inspirational,proverb"
+"Freedom is never more than one generation away from extinction. We didn't pass it to our children in the bloodstream. It must be fought for, protected, and handed on for them to do the same, or one day we will spend our sunset years telling our children and our children's children what it was once like in the United States where men were free.","Ronald Reagan","freedom,inspirational,personal-freedom"
+"I am always doing that which I can not do, in order that I may learn how to do it.","Pablo Picasso","inspirational"
+"He who hath many friends hath none.","Aristotle","hath,none"
+"When you encourage others, you in the process are encouraged because you're making a commitment and difference in that person's life. Encouragement really does make a difference.","Zig Ziglar","commitment,process"
+"Few people are capable of expressing with equanimity opinions which differ from the prejudices of their social enviroment. Most people are incapable of forming such opinions."(Essay to Leo Baeck, 1953)","Albert Einstein","disagreement,dissent,equanimity,expressing,knowledge,opinions,prejudices,social,society,thought"
+"If you live each day as it was your last, someday you'll most certainly be right","Steve Jobs","death,life,rip,steve"
+"Happy girls are the prettiest","Audrey Hepburn","attractiveness,beauty,happiness"
+"All great achievements require time.","Maya Angelou","inspirational,motivation"
+"Some painters transform the sun into a yellow spot, others transform a yellow spot into the sun.","Pablo Picasso","sun,yellow,others"
+"I want to thank you, Lord, for life and all that's in it. Thank you for the day and for the hour, and the minute.","Maya Angelou","inspirational"
+"Eat a live frog first thing in the morning and nothing worse will happen to you the rest of the day.","Mark Twain","humor"
+"On some positions, cowardice asks the question, is it expedient? And then expedience comes along and asks the question, is it politic? Vanity asks the question, is it popular? Conscience asks the question, is it right?There comes a time when one must take the position that is neither safe nor politic nor popular, but he must do it because conscience tells him it is right.","Martin Luther King Jr.","truth"
+"I cannot imagine a God who rewards and punishes the objects of his creation, whose purposes are modeled after our own -- a God, in short, who is but a reflection of human frailty. Neither can I believe that the individual survives the death of his body, although feeble souls harbor such thoughts through fear or ridiculous egotisms.","Albert Einstein","god,rational,religion,thinking"
+"Nothing will work unless you do.","Maya Angelou","inspirational"
+"The heart can think of no devotionGreater than being shore to the ocean-Holding the curve of one position,Counting an endless repetition.","Robert Frost","devotion,love,poetry"
+"Just give me a comfortable couch, a dog, a good book, and a woman. Then if you can get the dog to go somewhere and read the book, I might have a little fun.","Groucho Marx","dogs,humor,innuendo,naughty"
+"If all printers were determined not to print anything till they were sure it would offend nobody, there would be very little printed.","Benjamin Franklin","books,censorship,reading"
+"I love the song 'I Hope You Dance' by Lee Ann Womack. I was going to write that song, but someone beat me to it.","Maya Angelou","love,dance,song"
+"The way to know life is to love many things.","Vincent Van Gogh","life"
+"I believe in intuitions and inspirations...I sometimes FEEL that I am right. I do not KNOW that I am.","Albert Einstein","certainty,faith,intuition"
+"Yes We Can!","Barack Obama","action,hope,positivity"
+"I've had a lot of worries in my life, most of which never happened.","Mark Twain","inspirational,positive,worries"
+"Never bend your head. Hold it high. Look the world straight in the eye.","Helen Keller","inspirational"
+"I long to accomplish a great and noble task, but it is my chief duty to accomplish small tasks as if they were great and noble.","Helen Keller","inspirational"
+"Life is more or less a lie, but then again, that's exactly the way we want it to be.","Bob Dylan","dylan,life,zen"
+"The high-minded man must care more for the truth than for what people think.","Aristotle","truth"
+"My religion consists of a humble admiration of the illimitable superior spirit who reveals himself in the slight details we are able to perceive with our frail and feeble mind.","Albert Einstein","god,religion,spirit,spirituality"
+"It is not the failure of others to appreciate your abilities that should trouble you, but rather your failure to appreciate theirs.","Confucius","inspirational"
+"The man of wisdom is never of two minds;the man of benevolence never worries;the man of courage is never afraid.","Confucius","wisdom"
+"Success is the maximum utilization of the ability that you have.","Zig Ziglar","ability,maximum"
+"I've learned that you shouldn't go through life with a catcher's mitt on both hands; you need to be able to throw some things back.","Maya Angelou","wisdom"
+"If at first the idea is not absurd, then there is no hope for it.","Albert Einstein","absurd,absurdity,hope,idea"
+"Love is the only force capable of transforming an enemy to a friend.","Martin Luther King Jr.","love"
+"Simple can be harder than complex: You have to work hard to get your thinking clean to make it simple. But it’s worth it in the end because once you get there, you can move mountains.","Steve Jobs","complex,hard,simple,thinking,truth,wisdom,worth"
+"The way out is through the door. Why is it that no one will use this method?","Confucius","tao,wisdom,zen"
+"Stay hungry. Stay foolish.","Steve Jobs","inspirational"
+"When you do something noble and beautiful and nobody noticed, do not be sad. For the sun every morning is a beautiful spectacle and yet most of the audience still sleeps.","John Lennon","beatles,beauty,life,nature,sadness"
+"I have left orders to be awakened at any time during national emergency, even if I'm in a cabinet meeting.","Ronald Reagan","growing-older,humor,napping,politics"
+"The tragedy of life is what dies inside a man while he lives.","Albert Einstein","inspirational,life"
+"One thing I have learned in a long life: that all our science, measured against reality, is primitive and childlike -- and yet it is the most precious thing we have.","Albert Einstein","science"
+"He not busy being born is busy dying.","Bob Dylan","inspirational,lyrics,music,self-discovery,songs"
+"Earl Nightingale has inspired more people toward success and fortune than any other motivational speaker on the planet.","Zig Ziglar","inspired,planet,fortune"
+"People don’t like to think, if one thinks, one must reach conclusions. Conclusions are not always pleasant.","Helen Keller","life,thinking"
+"Life is a whim of several billion cells to be you for a while","Groucho Marx","humor"
+"The first question which the priest and the Levite asked was: 'If I stop to help this man, what will happen to me?' But...the good Samaritan reversed the question: 'If I do not stop to help this man, what will happen to him?","Martin Luther King Jr.","love,neighbor,sacrifice,service"
+"In all the world, there is no heart for me like yours.In all the world, there is no love for you like mine.","Maya Angelou","love,perfect-fit"
+"Give a bowl of rice to a man and you will feed him for a day. Teach him how to grow his own rice and you will save his life.","Confucius","education,enduring-good,generosity,life,wise-advice,work"
+"The Revolution introduced me to art, and in turn, art introduced me to the Revolution!","Albert Einstein","communism,democracy,freedom,history,hope,individualism,philosophy,politics,revolution,socialism"
+"I have gained this by philosophy … I do without being ordered what some are constrained to do by their fear of the law.","Aristotle","law,morality,philosophy,secular-ethics,secular-morality"
+"Intellectual growth should commence at birth and cease only at death.","Albert Einstein","inspirational,lifelong-learning"
+"The hardest thing of all is to find a black cat in a dark room, especially if there is no cat.","Confucius","futility,knowledge,religion"
+"Only the wisest and stupidest of men never change.","Confucius","wisdom"
+"Part of me suspects that I'm a loser, and the other part of me thinks I'm God Almighty.","John Lennon","ego,god,life,loser"
+"Sometimes life hits you in the head with a brick. Don't lose faith.","Steve Jobs","faith,brick,head"
+"We know from daily life that we exist for other people first of all, for whose smiles and well-being our own happiness depends.","Albert Einstein","friendship,life,relationships"
+"Don't find fault, find a remedy; anybody can complain","Henry Ford","common-sense,inspirational"
+"Your success and happiness lies in you. Resolve to keep happy, and your joy and you shall form an invincible host against difficulties.","Helen Keller","new-year's,happiness"
+"What I'm looking for is not out there, it is in me.","Helen Keller","self,truth"
+"There is no limit to the amount of good you can do if you don't care who gets the credit.","Ronald Reagan","accomplishment,achievement,inspirational,misattributed,modesty,recognition"
+"I was married by a judge. I should have asked for a jury.","Groucho Marx","humor,marriage"
+"Even on the most solemn occasions I got away without wearing socks and hid that lack of civilization in high boots","Albert Einstein","fashion,humor"
+"We dance round in a ring and suppose,But the Secret sits in the middle and knows.","Robert Frost","inspirational"
+"The most beautiful thing we can experience is the mysterious. It is the source of all true art and all science. He to whom this emotion is a stranger, who can no longer pause to wonder and stand rapt in awe, is as good as dead: his eyes are closed.","Albert Einstein","art,awe,einstein,emotion,eyes,life,mysterious,reality,science,the-mysterious,truth,vision"
+"The ideals which have always shone before me and filled me with joy are goodness, beauty, and truth.","Albert Einstein","beauty,ideas,kindness,truth"
+"The arc of the moral universe is long, but it bends towards justice.","Martin Luther King Jr.","inspirational,justice,mlk"
+"If I cannot do great things, I can do small things in a great way","Martin Luther King Jr.","excellence,success"
+"You don't need anybody to tell you who you are or what you are. You are what you are!","John Lennon","be-yourself,inspirational,self-awareness,self-determination"
+"The word 'God' is for me nothing more than the expression and product of human weaknesses, the Bible a collection of honorable, but still primitive legends which are nevertheless pretty childish. No interpretation, no matter how subtle, can (for me) change this.","Albert Einstein","bible,god,religion"
+"You can't connect the dots looking forward; you can only connect them looking backwards. So you have to trust that the dots will somehow connect in your future.","Steve Jobs","design,inspirational,life-lessons"
+"God is subtle but he is not malicious.","Albert Einstein","inspirational"
+"To write well, express yourself like the common people, but think like a wise man.","Aristotle","inspirational,philosophy,writing"
+"An individual has not started living until he can rise above the narrow confines of his individualistic concerns to the broader concerns of all humanity.","Martin Luther King Jr.","inspirational"
+"The person who deserves most pity is a lonesome one on a rainy day who doesn't know how to read.","Benjamin Franklin","books,illiteracy,literature,pity,reading,words"
+"No, this trick won't work... How on earth are you ever going to explain in terms of chemistry and physics so important a biological phenomenon as first love?","Albert Einstein","chemistry,first-love,love,physics"
+"Homer has taught all other poets the art of telling lies skillfully.","Aristotle","homer,poets,lies"
+"Let us live so that when we come to die even the undertaker will be sorry.","Mark Twain","inspirational"
+"Next time I see you, remind me not to talk to you.","Groucho Marx","gibe,humor,insult"
+"What is Man? Man is a noisome bacillus whom Our Heavenly Father created because he was disappointed in the monkey.","Mark Twain","creation,evolution,humor,mankind"
+"Information is not knowledge.","Albert Einstein","information,knowledge"
+"Literature is my Utopia","Helen Keller","books,literature,reading,words"
+"I have looked in the mirror every morning and asked myself: "If today were the last day of my life, would I want to do what I am about to do today?"And whenever the answer has been "No"for too many days in a row, I know I need to change something.","Steve Jobs","change,life"
+"We could never learn to be brave and patient if there were only joy in the world","Helen Keller","adversity,inspirational,personal-growth"
+"Challenges are gifts that force us to search for a new center of gravity. Don't fight them. Just find a new way to stand.","Oprah Winfrey","philosophy"
+"In the unlikely story that is America, there has never been anything false about hope.","Barack Obama","america,history,hope"
+"Possessions, outward success, publicity, luxury - to me these have always been contemptible. I believe that a simple and unassuming manner of life is best for everyone, best for both the body and the mind.","Albert Einstein","belongings,simplicity,success,values"
+"I believe that every single event in life happens in an opportunity to choose love over fear.","Oprah Winfrey","fear,love,opportunity"
+"I'm not the smartest fellow in the world, but I can sure pick smart colleagues.","Franklin D. Roosevelt","funny,humor,politics,usa"
+"Writing is the only thing that when I do it, I don't feel I should be doing something else.","Gloria Steinem","writing"
+"Forget injuries, never forget kindnesses.","Confucius","life,principles"
+"Happiness does not come from without, it comes from within","Helen Keller","happiness,life,love"
+"A bend in the road is not the end of the road…Unless you fail to make the turn.","Helen Keller","change,life,perseverance,risk"
+"Dancers are the athletes of God.","Albert Einstein","albert,life"
+"You can never be wise and be in love at the same time.","Bob Dylan","bob-dylan,love"
+"Room service? Send up a larger room."[A Night at the Opera]","Groucho Marx","hotel-rooms,hotels,humor,room-service"
+"Under certain circumstances, profanity provides a relief denied even to prayer."[Mark Twain, a Biography]","Mark Twain","humor,prayer,profanity,relief,you-ll-completely-understand-why"
+"Wit is educated insolence.","Aristotle","definitions,humor"
+"Life isn't worth living, unless it is lived for someone else.","Albert Einstein","einstein,life"
+"Everything is determined, the beginning as well as the end, by forces over which we have no control. It is determined for the insect, as well as for the star. Human beings, vegetables, or cosmic dust, we all dance to a mysterious tune, intoned in the distance by an invisible piper.","Albert Einstein","destiny,einstein,inspirational"
+"Early to bed and early to rise makes a man healthy, wealthy, and wise.","Benjamin Franklin","inspirational"
+"It is, in fact, nothing short of a miracle that the modern methods of instruction have not yet entirely strangled the holy curiosity of inquiry; for this delicate little plant, aside from stimulation, stands mainly in need of freedom. Without this it goes to wrack and ruin without fail.","Albert Einstein","creativity,education,learning,regimentation"
+"Everything has its wonders, even darkness and silence, and I learn, whatever state I may be in, therein to be content","Helen Keller","life"
+"I want to do it because I want to do it. Women must try to do things as men have tried. When they fail, their failure must be but a challenge to others.","Amelia Earhart","failure,success,women,work"
+"I count him braver who overcomes his desires than him who overcomes his enemies.","Aristotle","ethics,philosophy,self-discovery"
+"The reports of my death are greatly exaggerated.","Mark Twain","death,humor"
+"Let us not seek to satisfy our thirst for freedom by drinking from the cup of bitterness and hatred.","Martin Luther King Jr.","communism,democracy,freedom,liberation,love,marxism,peace,radicalism,socialism,theology"
+"A successful book is not made of what is in it, but what is left out of it.","Mark Twain","books,humor,on-writing,wit,writing"
+"We ran as if to meet the moon.","Robert Frost","moon,poetry,robert-frost"
+"We all know that Art is not truth. Art is a lie that makes us realize truth at least the truth that is given us to understand. The artist must know the manner whereby to convince others of the truthfulness of his lies.","Pablo Picasso","art,lies,truth"
+"When I was younger, I could remember anything, whether it had happened or not; but my faculties are decaying now and soon I shall be so I cannot remember any but the things that never happened. It is sad to go to pieces like this but we all have to do it.","Mark Twain","aging,humor"
+"I'm inspired by the people I meet in my travels--hearing their stories, seeing the hardships they overcome, their fundamental optimism and decency. I'm inspired by the love people have for their children. And I'm inspired by my own children, how full they make my heart. They make me want to work to make the world a little bit better. And they make me want to be a better man.","Barack Obama","family,hope,service"
+"You must expect great things of yourself before you can do them.","Michael Jordan","inspirational,success"
+"Many persons have a wrong idea of what constitutes true happiness. It is not attained through self-gratification but through fidelity to a worthy purpose.","Helen Keller","educator,inspirational"
+"The way a crowShook down on meThe dust of snowFrom a hemlock treeHas given my heartA change of moodAnd saved some partOf a day I had rued.","Robert Frost","poetry"
+"God is really only another artist. He invented the giraffe, the elephant and the cat. He has no real style, He just goes on trying other things.","Pablo Picasso","art,god"
+"The thing the sixties did was to show us the possibilities and the responsibility that we all had. It wasn't the answer. It just gave us a glimpse of the possibility.","John Lennon","1960s,beatles,hope,possibility,responsibility"
+"And were an epitaph to be my story I'd have a short one ready for my own. I would have written of me on my stone: I had a lover's quarrel with the world.","Robert Frost","life"
+"When obstacles arise, you change your direction to reach your goal; you do not change your decision to get there.","Zig Ziglar","inspirational"
+"Training is everything. The peach was once a bitter almond; cauliflower is nothing but cabbage with a college education.","Mark Twain","education,food"
+"Love is like a virus. It can happen to anybody at any time.","Maya Angelou","love"
+"Vision without execution is just hallucination.","Henry Ford","dreams,success"
+"My life isn’t theories and formulae. It’s part instinct, part common sense. Logic is as good a word as any, and I’ve absorbed what logic I have from everything and everyone… from my mother, from training as a ballet dancer, from Vogue magazine, from the laws of life and health and nature.","Audrey Hepburn","audrey-hepburn,inspiration,life"
+"Any customer can have a car painted any colour that he wants so long as it is black.","Henry Ford","black,customer,humor,model-t"
+"Play is the highest form of research.","Albert Einstein","childhood,education"
+"We can't become what we need to be by remaining what we are.","Oprah Winfrey","life"
+"It is by the goodness of god that in our country we have those 3 unspeakably precious things: freedom of speech, freedom of conscience, and the prudence never to practice either of them.","Mark Twain","humor,politics"
+"To those who are given much, much is expected.","Maya Angelou","inspirational-quotes"
+"Books can not be killed by fire. People die, but books never die. No man and no force can abolish memory... In this war, we know, books are weapons. And it is a part of your dedication always to make them weapons for man's freedom.","Franklin D. Roosevelt","book-burning,books,censorship,freedom"
+"Relationships are like Rome -- difficult to start out, incredible during the prosperity of the 'golden age', and unbearable during the fall. Then, a new kingdom will come along and the whole process will repeat itself until you come across a kingdom like Egypt... that thrives, and continues to flourish. This kingdom will become your best friend, your soul mate, and your love.","Helen Keller","love,relationship"
+"Our society is run by insane people for insane objectives","John Lennon","truth"
+"I know in my heart that man is good, that what is right will always eventually triumph, and there is purpose and worth to each and every life.","Ronald Reagan","inspirational"
+"Before I speak, I have something important to say.","Groucho Marx","absurdist,humor"
+"If you lose hope, somehow you lose the vitality that keeps moving, you lose that courage to be, that quality that helps you go on in spite of it all. And so today I still have a dream.","Martin Luther King Jr.","hope"
+"Time flows away like the water in the river.","Confucius","wisdom"
+"Self-esteem comes from being able to define the world in your own terms and refusing to abide by the judgments of others.","Oprah Winfrey","inspirational"
+"An empty stomach is not a good political adviser.","Albert Einstein","humor,hunger,poverty"
+"A foolish faith in authority is the worst enemy of truth.","Albert Einstein","dissent,independent-thought,science"
+"My life has been one great big joke,A dance that's walked,A song that's spoke,I laugh so hard I almost choke,When I think about myself.","Maya Angelou","life"
+"When red-headed people are above a certain social grade their hair is auburn.","Mark Twain","class,elitism,humor,redheads"
+"A poem begins with a lump in the throat; a homesickness or a love sickness. It is a reaching-out toward expression; an effort to find fulfillment. A complete poem is one where an emotion has found its thought and the thought has found words.","Robert Frost","poetry,writing"
+"Education breeds confidence. Confidence breeds hope. Hope breeds peace.","Confucius","education"
+"Poetry puts starch in your backbone so you can stand, so you can compose your life.","Maya Angelou","poetry"
+"It frightens me, the awful truth, of how sweet life can be...","Bob Dylan","fear,life,truth"
+"Have you ever felt the longing for someone you could admire? For something, not to look down at, but up to?","Ayn Rand","achievement,admiration,atlas-shrugged,inspirational,love,objectivism"
+"Politics is not a bad profession. If you succeed there are many rewards, if you disgrace yourself you can always write a book.","Ronald Reagan","humor,politics"
+"Shall I teach you what knowledge?When you know a thing, say that you know it;when you do not know a thing,admit that you do not know it.That is knowledge","Confucius","knowledge"
+"My best friend is the man who in wishing me well wishes it for my sake.","Aristotle","best,friend,wishes"
+"Make today worth remembering.","Zig Ziglar","inspirational,motivational"
+"I've heard that hard work never killed anyone, but I say why take the chance?","Ronald Reagan","humor,laziness,sloth,work"
+"I was feeling insecure you might not love me anymore","John Lennon","insecure,insecurity,love"
+"Democracy cannot succeed unless those who express their choice are prepared to choose wisely. The real safeguard of democracy, therefore, is education.","Franklin D. Roosevelt","education,government,political-philosophy,public-duty,voting"
+"Life is very short, and there's no time for fussing and fighting my friends","John Lennon","inspirational"
+"Happiness is not in the mere possession of money; it lies in the joy of achievement, in the thrill of creative effort.","Franklin D. Roosevelt","achievement,happiness,wealth,wisdom"
+"Doing the best at this moment puts you in the best place for the mext moment","Oprah Winfrey","inspirational,life,success"
+"The lack of money is the root of all evil.","Mark Twain","humor"
+"The human spirit must prevail over technology.","Albert Einstein","humanity,science"
+"Status quo, you know, is Latin for 'the mess we're in'.","Ronald Reagan","inspirational,latin,politics"
+"If we bear all this suffering and if there are still Jews left, when it is over, then Jews, instead of being doomed, will be held up as an example.","Anne Frank","holocaust,inspirational,judaism"
+"Teaching should be such that what is offered is perceived as a valuable gift and not as hard duty. Never regard study as duty but as the enviable opportunity to learn to know the liberating influence of beauty in the realm of the spirit for your own personal joy and to the profit of the community to which your later work belongs.","Albert Einstein","education"
+"We'll meet at the theater tonight. I'll hold your seat 'til you get there. Once you get there; you're on your own.","Groucho Marx","humor"
+"It is harder to crack prejudice than an atom.","Albert Einstein","humor,inspirational,science"
+"I can accept anything, except what seems to be the easiest for most people: the half-way, the almost, the just-about, the in-between.","Ayn Rand","compromise,philosophy"
+"A hospital bed is a parked taxi with the meter running.","Groucho Marx","funny"
+"The best and most beautiful things in the world cannot be seen or even touched - they must be felt with the heart.","Helen Keller","best,beautiful,heart"
+"No amount of experimentation can ever prove me right; a single experiment can prove me wrong.","Albert Einstein","science"
+"I don't try to imagine a personal God; it suffices to stand in awe at the structure of the world, insofar as it allows our inadequate senses to appreciate it.","Albert Einstein","atheism,atheist,god,religion"
+"Happiness is a state of activity.","Aristotle","activity,happiness"
+"Although I am a typical loner in my daily life, my awareness of belonging to the invisible community of those who strive for truth, beauty, and justice has prevented me from feelings of isolation.","Albert Einstein","albert-einstein,einstein,inspirational,loner"
+"We may have all come on different ships, but we're in the same boat now.","Martin Luther King Jr.","cultural,inspirational,spiritual"
+"I believe that God is in me as the sun is in the colour and fragrance of a flower - the Light in my darkness, the Voice in my silence.","Helen Keller","inspirational"
+"You see, wire telegraph is a kind of a very, very long cat. You pull his tail in New York and his head is meowing in Los Angeles. Do you understand this? And radio operates exactly the same way: you send signals here, they receive them there. The only difference is that there is no cat.","Albert Einstein","analogy,humor,science"
+"Familiarity breeds contempt and children.","Mark Twain","humor"
+"Learning is an ornament in prosperity, a refuge in adversity, and a provision in old age.","Aristotle","adversity,education,learning,old-age,prosperity"
+"A lot of people have gone further than they thought they could because someone else thought they could","Zig Ziglar","inspirational"
+"Life is a succesion of lessons which must be lived to be understood.","Helen Keller","lessons,life"
+"There are no traffic jams on the extra mile.","Zig Ziglar","inspirational"
+"You're gonna have to serve somebody; well, it may be the devil, or it may be the Lord, but you're gonna have to serve somebody...","Bob Dylan","christianity,faith,god"
+"Small is the number of them that see with their own eyes and feel with their own hearts.","Albert Einstein","heart,inspirational"
+"Act the way you'd like to be and soon you'll be the way you'd like to act.","Bob Dylan","action,change,improvement,misattributed-to-leonard-cohen,motivational,success"
+"It may be true that the law cannot make a man love me, but it can stop him from lynching me, and I think that's pretty important.","Martin Luther King Jr.","law,life,lynchings"
+"Happiness depends more on the inward disposition of mind than on outward circumstances.","Benjamin Franklin","inspirational"
+"A big leather-bound volume makes an ideal razorstrap. A thing book is useful to stick under a table with a broken caster to steady it. A large, flat atlas can be used to cover a window with a broken pane. And a thick, old-fashioned heavy book with a clasp is the finest thing in the world to throw at a noisy cat.","Mark Twain","books,humor"
+"No man's life, liberty, or property are safe while the legislature is in session.","Mark Twain","government,humor,politics"
+"Don't say the old lady screamed. Bring her on and let her scream.","Mark Twain","on-writing,writing"
+"Until justice rolls down like water and righteousness like a mighty stream.","Martin Luther King Jr.","america,freedom,human-rights,inspirational,justice,peace,pride,righteousness,stream,water"
+"Four things to learn in life: To think clearly without hurry or confusion; To love everybody sincerely; To act in everything with the highest motives; To trust God unhesitatingly.","Helen Keller","inspirational"
+"Strange is our situation here on Earth. Each of us comes for a short visit, not knowing why, yet sometimes seeming to divine a purpose. From the standpoint of daily life, however, there is one thing we do know: that man is here for the sake of other men - above all for those upon whose smiles and well-being our own happiness depends.","Albert Einstein","inspirational-quotes"
+"Everything that is really great and inspiring is created by the individual who can labor in freedom.","Albert Einstein","mind"
+"Courage is the price that life exacts for granting peace.","Amelia Earhart","bravery,courage,life,peace"
+"If a man could have half of his wishes, he would double his troubles.","Benjamin Franklin","wisdom"
+"In religion and politics people’s beliefs and convictions are in almost every case gotten at second-hand, and without examination, from authorities who have not themselves examined the questions at issue but have taken them at second-hand from other non-examiners, whose opinions about them were not worth a brass farthing.","Mark Twain","beliefs,politics,religion"
+"What I’ve realized is that life doesn’t count for much unless you’re willing to do your small part to leave our children — all of our children — a better world. Any fool can have a child. That doesn’t make you a father. It’s the courage to raise a child that makes you a father.","Barack Obama","inspirational"
+"A friend to all is a friend to none.","Aristotle","friend,none"
+"It usually takes me two or three days to prepare an impromptu speech.","Mark Twain","humor,preparedness"
+"I define nothing. Not beauty, not patriotism. I take each thing as it is, without prior rules about what it should be.","Bob Dylan","acceptance,bob,define,dylan,life"
+"It does not matter how long you live, but how well you do it.","Martin Luther King Jr.","inspirational"
+"It occurred to me by intuition, and music was the driving force behind that intuition. My discovery was the result of musical perception.","Albert Einstein","inspirational"
+"You look at yourself and you accept yourself for who you are, and once you accept yourself for who you are you become a better person.","Oprah Winfrey","inspirational"
+"Lots of people want to ride with you in the limo, but what you want is someone who will take the bus with you when the limo breaks down.","Oprah Winfrey","ride,bus,breaks"
+"Who is wise? He that learns from everyone. Who is powerful? He that governs his passions. Who is rich? He that is content. Who is that? Nobody.","Benjamin Franklin","philosophy,wisdom"
+"Paying alimony is like feeding hay to a dead horse.","Groucho Marx","divorce,funny"
+"Occasionally in life there are those moments of unutterable fulfillment which cannot be completely explained by those symbols called words. Their meanings can only be articulated by the inaudible language of the heart.","Martin Luther King Jr.","ecstasy,euphoria,fulfillment,life,power-of-words"
+"Learning without thought is labor lost; thought without learning is perilous.","Confucius","education,learning,thinking"
+"I believe that Gandhi’s views were the most enlightened of all the political men in our time. We should strive to do things in his spirit: not to use violence in fighting for our cause, but by non-participation in anything you believe is evil.","Albert Einstein","causes,corporations,gandhi,truth,violence"
+"The game of basketball has been everything to me. My place of refuge, place I've always gone where I needed comfort and peace. It's been the site of intense pain and the most intense feelings of joy and satisfaction. It's a relationship that has evolved over time, given me the greatest respect and love for the game.","Michael Jordan","sports,time,respect"
+"I hate girls that giggle all the time... You hate any girl that David looks at.","Audrey Hepburn","humor"
+"I believe in pink. I believe that laughing is the best calorie burner. I believe in kissing, kissing a lot. I believe in being strong when everything seems to be going wrong. I believe that happy girls are the prettiest girls. I believe that tomorrow is another day and I believe in miracles.","Audrey Hepburn","best,kissing,happy"
+"You say I started out with practically nothing, but that isn't correct. We all start with all there is, it's how we use it that makes things possible.","Henry Ford","inspiration"
+"Anyone who doesn't take truth seriously in small matters cannot be trusted in large ones either.","Albert Einstein","albert-einstein,truth"
+"I wasn't saying whatever they're saying I was saying. I'm sorry I said it really. I never meant it to be a lousy anti-religious thing. I apologize if that will make you happy. I still don't know quite what I've done. I've tried to tell you what I did do but if you want me to apologize, if that will make you happy, then OK, I'm sorry.","John Lennon","apology,beatles,humor,humour,jesus,religion"
+"Success is the doing, not the getting; in the trying, not the triumph. Success is a personal standard, reaching for the highest that is in us, becoming all that we can be. If we do our best, we are a success.","Zig Ziglar","doing,success"
+"I have a great respect for incremental improvement, and I've done that sort of thing in my life, but I've always been attracted to the more revolutionary changes. I don't know why. Because they're harder. They're much more stressful emotionally. And you usually go through a period where everybody tells you that you've completely failed.","Steve Jobs","respect,great,changes"
+"A bank is a place where they lend you an umbrella in fair weather and ask for it back when it begins to rain.","Robert Frost","humor,poetry"
+"A society's competitive advantage will come not from how well its schools teach the multiplication and periodic tables, but from how well they stimulate imagination and creativity.","Albert Einstein","science"
+"There are only a few notes. Just variations on a theme.","John Lennon","life,music"
+"A man is accepted into a church for what he believes and he is turned out for what he knows.","Mark Twain","religion"
+"It is this belief in a power larger than myself and other than myself which allows me to venture into the unknown and even the unknowable.","Maya Angelou","faith,inspirational"
+"A man is never more truthful than when he acknowledges himself a liar.","Mark Twain","truth"
+"What a wee little part of a person's life are his acts and his words! His real life is led in his head, and is known to none but himself.","Mark Twain","words,head,himself"
+"Your success and happiness lie in you.","Helen Keller","happiness,inspirational-quotes,success"
+"Ah, when to the heart of man Was it ever less than a treason To go with the drift of things, To yield with a grace to reason, And bow and accept the end Of a love or a season?","Robert Frost","love,poetry"
+"Never allow the fear of striking out keep you from playing the game!","Babe Ruth","babe-ruth,baseball,cinderella-story,inspiration,motivation,quote,success"
+"The whole secret of a successful life is to find out what is one's destiny to do, and then do it.","Henry Ford","success"
+"The future doesn't belong to the light-hearted. It belongs to the brave.","Ronald Reagan","bravery,inspirational,reagan"
+"An ounce of prevention is worth a pound of cure.","Benjamin Franklin","preparation,prevention,proverb,wisdom"
+"Wishing to be friends is quick work, but friendship is a slow ripening fruit.","Aristotle","work,fruit,slow"
+"One of the strongest motives that lead men to art and science is escape from everyday life with its painful crudity and hopeless dreariness, from the fetters of one's own ever-shifting desires. A finely tempered nature longs to escape from the personal life into the world of objective perception and thought.","Albert Einstein","art,life,science"
+"Making a decision to write was a lot like deciding to jump into a frozen lake.","Maya Angelou","writing"
+"America is too great for small dreams.","Ronald Reagan","inspirational"
+"What I try to do is write. I may write for two weeks ‘the cat sat on the mat, that is that, not a rat,’.... And it might be just the most boring and awful stuff. But I try. When I’m writing, I write. And then it’s as if the muse is convinced that I’m serious and says, ‘Okay. Okay. I’ll come.","Maya Angelou","writing"
+"And the only way to do great work is to love what you do.... Don't settle","Steve Jobs","inspirational"
+"The Master said, “A true teacher is one who, keeping the past alive, is also able to understand the present.”(Analects 2.11)","Confucius","history,teachers,wisdom"
+"I do not want the peace which passeth understanding, I want the understanding which bringeth peace.","Helen Keller","education,ispirational,learning,understanding"
+"If books are not good company, where shall I find it?","Mark Twain","books"
+"Remembering that you are going to die is the best way I know to avoid the trap of thinking you have something to lose.","Steve Jobs","death,hope,inspirational,life,motivatonal"
+"I failed to communicate, that's why I chose to leave","Bob Dylan","life"
+"What affects one in a major way, affects all in a minor way.","Martin Luther King Jr.","inspirational"
+"Poetry is a way of taking life by the throat.","Robert Frost","life,poetry"
+"…Christianity will go. It will vanish and shrink. I don't know what will go first, rock 'n' roll or Christianity. We're more popular than Jesus now. Jesus was all right, but his disciples were thick and ordinary. It's them twisting it that ruins it for me.","John Lennon","beatles,christianity,popular-culture,religion"
+"Great spirits have always encountered opposition from mediocre minds. The mediocre mind is incapable of understanding the man who refuses to bow blindly to conventional prejudices and chooses instead to express his opinions courageously and honestly.","Albert Einstein","inspirational,opposition,wisdom"
+"You better start swimming, or you'll sink like a stone. Because the Time's they are a-changing.","Bob Dylan","life,music"
+"Mr. Right is coming, but he's in Africa and he's walking.","Oprah Winfrey","funny"
+"That's what you want to do? Then nothing beats a trial but a failure. Give it everything you've got. I've told you many times, 'Cant do is like Dont Care.' Neither of them have a home.","Maya Angelou","inspirational"
+"We are faced with the fact, my friends, that tomorrow is today. Procrastination is still the thief of time. Over the bleached bones and jumbled residues of numerous civilizations are written the pathetic words ‘Too Late’.","Martin Luther King Jr.","inspirational,political"
+"I want all my senses engaged. Let me absorb the world's variety and uniqueness.","Maya Angelou","inspirational"
+"Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.","Mark Twain","education"
+"It is more shameful to distrust our friends than to be deceived by them.","Confucius","deceived,distrust,shameful"
+"Don't be distracted by criticism. Remember ~ the only taste of success some people have is when they take a bite out of you.","Zig Ziglar","believe,inspiration,motivational,zig-ziglar"
+"Freedom means the supremacy of human rights everywhere. Our support goes to those who struggle to gain those rights and keep them. Our strength is our unity of purpose. To that high concept there can be no end save victory.","Franklin D. Roosevelt","human-rights,truth"
+"Poetry is finer and more philosophical than history; for poetry expresses the universal, and history only the particular.","Aristotle","poetry"
+"No lake so still but it has its wave.No circle so perfect but that it has its blur.I would change things for you if I could; As I can't you must take them as they are.","Confucius","inspirational,life,philosophy"
+"Like anybody, I would like to have a long life. Longevity has its place. But I'm not concerned about that now. I just want to do God's will.","Martin Luther King Jr.","foreboding,life"
+"Rich People plan for three generationsPoor people plan for Saturday night","Gloria Steinem","class-distinction,inspirational"
+"Never contract friendship with a man that is not better than thyself.","Confucius","contract,thyself"
+"Each morning when I open my eyes I say to myself: I, not events, have the power to make me happy or unhappy today.","Groucho Marx","happiness,inspirational"
+"The significant problems we have cannot be solved at the same level of thinking with which we created them.","Albert Einstein","inspirational,problems,science"
+"I am overwhelmed by the grace and persistence of my people.","Maya Angelou","family,inspirational"
+"To plant a garden is to believe in tomorrow.","Audrey Hepburn","hope,inspirational"
+"youth is easily deceived because it is quick to hope.","Aristotle","deceit,gullibility,hope,wisdom,youth"
+"As our circle of knowledge expands, so does the circumference of darkness surrounding it.","Albert Einstein","knowledge"
+"It doesn't matter who you are, where you come from. The ability to triumph begins with you - always.","Oprah Winfrey","inspirational"
+"Knowledge is merely brilliance in organization of ideas and not wisdom. The truly wise person goes beyond knowledge.","Confucius","wisdom"
+"In the view of such harmony in the cosmos which I, with my limited human mind, am able to recognize, there are yet people who says there is no God. But what makes me really angry is that they quote me for support of such views. (The Expanded Quotable Einstein, Princeton University, page 214)","Albert Einstein","creation,god"
+"The Truth is found when men (and Women) are free to pursue it.","Franklin D. Roosevelt","freedom,liberty,truth"
+"Don't be trapped by dogma — which is living with the results of other people's thinking.","Steve Jobs","life,thinking"
+"When You Cease To Exist, Then Who Will You Blame?","Bob Dylan","inspirational"
+"A man's ethical behaviour should be based effectually on sympathy, education, and social ties and needs; no religious basis is necessary. Man would indeed be in a poor way if he had to be restrained by fear of punishment and hope of reward after death.","Albert Einstein","education,ethics,morality,needs,religion,sociality,sympathy"
+"Segregation shaped me; education liberated me.","Maya Angelou","education,inspirational,learning,race"
+"The gods too are fond of a joke.","Aristotle","god,humor,jokes,religion"
+"A war of ideas can no more be won without books than a naval war can be won without ships. Books, like ships, have the toughest armor, the longest cruising range, and mount the most powerful guns.","Franklin D. Roosevelt","boats,books,ideas,sailing,ships"
+"Take one cup of love, two cups of loyalty, three cups of forgiveness, four quarts of faith and one barrel of laughter. Take love and loyalty and mix them thoroughly with faith; blend with tenderness, kindness and understanding. Add friendship and hope. Sprinkle abundantly with laughter. Bake it with sunshine. Wrap it regularly with lots of hugs. Serve generous helpings daily.","Zig Ziglar","happiness,marriage"
+"If I could give you information of my life it would be to show how a woman of very ordinary ability has been led by God in strange and unaccustomed paths to do in His service what He has done in her. And if I could tell you all, you would see how God has done all, and I nothing. I have worked hard, very hard, that is all; and I have never refused God anything.","Florence Nightingale","god,service,work"
+"Life is about becoming more of who you really are....","Oprah Winfrey","inspirational"
+"If God would have wanted us to live in a permissive society He would have given us Ten Suggestions and not Ten Commandments.","Zig Ziglar","religion"
+"Go as far as you can see and you will see further.","Zig Ziglar","inspirational,motivation,success"
+"Without continual growth and progress, such words as improvement, achievement, and success have no meaning.","Benjamin Franklin","growth,progress,words"
+"To avoid being mistaken for a sellout, I chose my friends carefully. The more politically active black students. The foreign students. The Chicanos. The Marxist professors and structural feminists and punk-rock performance poets.","Barack Obama","black,students,active"
+"Anyone who fights for the future, lives in it today.","Ayn Rand","idealism,philosophy"
+"My philosophy, in essence, is the concept of man as a heroic being, with his own happiness as the moral purpose of his life, with productive achievement as his noblest activity, and reason as his only absolute.","Ayn Rand","philosophy"
+"A tyrant must put on the appearance of uncommon devotion to religion. Subjects are less apprehensive of illegal treatment from a ruler whom they consider god-fearing and pious. On the other hand, they do less easily move against him, believing that he has the gods on his side.","Aristotle","morality,politics,religion"
+"Curiosity is more important than knowledge.","Albert Einstein","curiosity,knowledge"
+"I said to my children, 'I'm going to work and do everything that I can do to see that you get a good education. I don't ever want you to forget that there are millions of God's children who will not and cannot get a good education, and I don't want you feeling that you are better than they are. For you will never be what you ought to be until they are what they ought to be.","Martin Luther King Jr.","altruism,education,potential,religion"
+"God made a beauteous garden With lovely flowers strown,But one straight, narrow pathway That was not overgrown.And to this beauteous garden He brought mankind to live,And said "To you, my children, These lovely flowers I give.Prune ye my vines and fig trees, With care my flowers tend,But keep the pathway open Your home is at the end."God's Garden","Robert Frost","frost,gardens,god"
+"Herodotus says, "Very few things happen at the right time, and the rest do not happen at all: the conscientious historian will correct these defects.","Mark Twain","creativity,historians,history,truth"
+"You will find the key to success under the alarm clock.","Benjamin Franklin","success"
+"Character cannot be developed in ease and quiet. Only through experience of trial and suffering can the soul be strengthened, ambition inspired, and success achieved.","Helen Keller","strength,experience"
+"The happiness you feel is in direct proportion to the love you give.","Oprah Winfrey","happiness,life,love"
+"If one tries to navigate unknown waters one runs the risk of shipwreck","Albert Einstein","wisdom"
+"You teach people how to treat you.","Oprah Winfrey","inspirational,oprah"
+"Nothing exists but you. And you are but a thought.","Mark Twain","atheism,inspirational"
+"It's sad if people think that's (homemaking) a dull existance, [but] you can't just buy an apartment and furnish it and walk away. It's the flowers you choose, the music you play, the smile you have waiting. I want it to be gay and cheerful, a haven in this troubled world. I don't want my husband and children to come home and find a rattled woman. Our era is already rattled enough, isn't it?","Audrey Hepburn","homemaking,inspirational,motherhood"
+"They say that patriotism is the last refuge to which a scoundrel clings steal a little and they throw you in jail. Steal a lot and then they make you king.","Bob Dylan","patriotism,scoundrels,truth,wisdom"
+"The test of any good fiction is that you should care something for the characters; the good to succeed, the bad to fail. The trouble with most fiction is that you want them all to land in hell together, as quickly as possible.","Mark Twain","writing"
+"Wit is the sudden marriage of ideas which before their union were not perceived to have any relation.","Mark Twain","cleverness,writing"
+"Learn as if you were not reaching your goal and as though you were scared of missing it","Confucius","confucious,inspirational,learning,life"
+"Walking with a friend in the dark is better than walking alone in the light.","Helen Keller","alone,dark,light"
+"For dear me, why abandon a beliefMerely because it ceases to be true","Robert Frost","faith,truth"
+"The way to wealth is as plain as the way to market. It depends chiefly on two words, industry and frugality: that is, waste neither time nor money, but make the best use of both. Without industry and frugality nothing will do, and with them everything.","Benjamin Franklin","advice-for-daily-living,enterprise,frugality,success"
+"Without feelings of respect, what is there to distinguish men from beasts?","Confucius","philosophical,understanding,wisdom"
+"One should never use exclamation points in writing. It is like laughing at your own joke.","Mark Twain","advice,clever,exclamation-point,writing"
+"Combinatory play seems to be the essential feature in productive thought.","Albert Einstein","education,learning,play"
+"Every form of happiness if one, every desire is driven by the same motor--by our love for a single value, for the highest potentiality of our own existence--and every achievement is an expression of it.","Ayn Rand","galt,objectivism,philosophy,taggart"
+"Economic power is exercised by means of a positive, by offering men a reward, an incentive, a payment, a value; political power is exercised by means of a negative, by the threat of punishment, injury, imprisonment, destruction. The businessman's tool is values; the bureaucrat's tool is fear.","Ayn Rand","businessmen,capitalism,force,government,money,philosophy,profit"
+"That which is impenetrable to us really exists. Behind the secrets of nature remains something subtle, intangible, and inexplicable. Veneration for this force beyond anything that we can comprehend is my religion.","Albert Einstein","religion"
+"he who will not economize will have to agonize","Confucius","frugality,ignorance,knowledge,suffering,wisdom"
+"Art is not the application of a canon of beauty but what the instinct and the brain can conceive beyond any canon. When we love a woman we don't start measuring her limbs.","Pablo Picasso","love,beauty,woman"
+"The best way to see Faith is to shut the eye of Reason.","Benjamin Franklin","wisdom"
+"To do more for the world than the world does for you - that is success.","Henry Ford","success"
+"It makes no difference where you go, there you are. And it makes no difference what you have, there’s always more to want. Until you are happy with who you are, you will never be happy because of what you have.","Zig Ziglar","grateful,happiness"
+"The right to search for the truth implies also a duty; one must not conceal any part of what one has recognized to be the truth.","Albert Einstein","truth"
+"When you have faults, do not fear to abandon them.","Confucius","advice,life,success"
+"I find relief from the questions only when I concede that I am not obliged to know everything. I remind myself it is sufficient to know what I know, and that what I know, may not always be true.","Maya Angelou","death,letter-to-my-daughter"
+"I confess to wincing every so often at a poorly chosen word, a mangled sentence, an expression of emotion that seems indulgent or overly practiced. I have the urge to cut the book by fifty pages or so, possessed as I am with a keener appreciation for brevity.","Barack Obama","writing"
+"He who speaks without modesty will find it difficult to make his words good.","Confucius","inspirational-quotes"
+"An attempt to achieve the good by force is like an attempt to provide a man with a picture gallery at the price of cutting out his eyes.","Ayn Rand","force,good,philosophy"
+"Listen. Pay attention. Treasure every moment.","Oprah Winfrey","wisdom"
+"It is a very grave mistake to think that the enjoyment of seeing and searching can be promoted by means of coercion and a sense of duty. To the contrary, I believe it would be possible to rob even a healthy beast of prey of its voraciousness, if it were possible, with the aid of a whip, to force the beast to devour continuously, even when not hungry.","Albert Einstein","education,learning"
+"Deux choses sont infinies : l’Univers et la bêtise humaine. Mais, en ce qui concerne l’Univers, je n’en ai pas encore acquis la certitude absolue.","Albert Einstein","bêtise-humaine,einstein,french,humour,infini,philosophie,science,universe"
+"La vie est une aventure audacieuse ou alors elle n'est rien.","Helen Keller","aventure,inspiration,vie"
+"All pain is the same.","Oprah Winfrey","pain,wisdom"
+"I couldn't tell fact from fiction,Or if the dream was trueMy only sure predictionIn this world was you.I'd touch your features inchly. Beard love and dared the cost, The sented spiel reeled me unreal And I found my senses lost.","Maya Angelou","inspiration,poetry"
+"It is this mythical, or rather symbolic, content of the religious traditions which is likely to come into conflict with science. This occurs whenever this religious stock of ideas contains dogmatically fixed statements on subjects which belong in the domain of science.","Albert Einstein","religion,science"
+"Try and penetrate with our limited means the secrets of nature and you will find that, behind all the descernible laws and connections, there remains something subtle, intangible and inexplicable. Veneration for this force beyond anything that we can comprehend is my religion. To that extent I am, in fact, religious.","Albert Einstein","science,spirituality"
+"We have not the reverent feeling for the rainbow that the savage has, because we know how it is made. We have lost as much as we gained by prying into that matter.","Mark Twain","awe,science"
+"If you're silent for a long time, people just arrive in your mind.","Alice Walker","characters,mind,people,silence,silent,writers,writing"
+"I speak to the Black experience, but I am always talking about the human condition--about what we can endure, dream, fail at and survive.","Maya Angelou","writing"
+"My best friend is the one who brings out the best in me.","Henry Ford","best,friend,brings"
+"An inventor is a man who asks 'Why?' of the universe and lets nothing stand between the answer and his mind.","Ayn Rand","philosophy"
+"But yield who will to their separation, My object in living is to uniteMy avocation and my vocationAs my two eyes make one in sight.","Robert Frost","goals,inspiration"
+"A moment or an eternity—did it matter? Life, undefeated, existed and could exist.","Ayn Rand","life,philosophy"
+"Faith is taking the first step even when you don't see the whole staircase.","Martin Luther King Jr.","faith"
+"Oh juventud nunca dejes de pensar...","Albert Einstein","science"
+"To study and constantly, is this not a pleasure? To have friends come from far away places, is this not a joy? If people do not recognize your worth, but this does not worry you, are you not a true gentleman?","Confucius","good-things,inspiration,life"
+"In any compromise between food and poison, it is only death that can win. In any compromise between good and evil, it is only evil that can profit.","Ayn Rand","evil,good,philosophy"
+"One more thing...","Steve Jobs","inspiration"
+"Man is here for the sake of other men - above all for those upon whose smiles and well-being our own happiness depends.","Albert Einstein","happiness,purpose"
+"And now I see the face of god, and I raise this god over the earth, this god whom men have sought since men came into being, this god who will grant them joy and peace and pride. This god, this one word: 'I.","Ayn Rand","individualism,philosophy"
+"Definitions are the guardians of rationality, the first line of defense against the chaos of mental disintegration.","Ayn Rand","epistemology,philosophy,reason"
+"The spread of evil is the symptom of a vacuum. Whenever evil wins, it is only by default: by the moral failure of those who evade the fact that there can be no compromise on basic principles.","Ayn Rand","evil,philosophy"
+"A culture is made — or destroyed — by its articulate voices.","Ayn Rand","philosophy"
+"The doorstep to the temple of wisdom is a knowledge of our own ignorance.","Benjamin Franklin","knowledge"
+"First of all, let me assert my firm belief that the only thing we have to fear is fear itself - nameless, unreasoning, unjustified terror which paralyzes needed efforts to convert retreat into advance.","Franklin D. Roosevelt","fear,inspiration"
+"Aristotle may be regarded as the cultural barometer of Western history. Whenever his influence dominated the scene, it paved the way for one of history's brilliant eras; whenever it fell, so did mankind.","Ayn Rand","aristotle,philosophy,reason"
+"It is the kindness to take in a stranger when the levees break; the selflessness of workers who would rather cut their hours than see a friend lose their job which sees us through our darkest hours. It is the firefighter's courage to storm a stairway filled with smoke, but also a parent's willingness to nurture a child, that finally decides our fate.","Barack Obama","america,inauguration,inspiration"
+"One today is worth two tomorrows.","Benjamin Franklin","today,worth,tomorrows"
+"Your success and happiness lies in you. Resolve to keep happy, and your joy and you shall form an invicible host against difficulties.","Helen Keller","happiness,helen-keller,inspirational-life,life,success"
+"But I don't know how to fight. All I know how to do is stay alive.","Alice Walker","faith,life,sad,western"
+"I have an unshaken conviction that democracy can never be undermined if we maintain our library resources and a national intelligence capable of utilizing them."[Letter to Herbert Putnam; in: Waters, Edward N.: Herbert Putnam: the tallest little man in the world; Quarterly Journal of the Library of Congress 33:2 (April 1976), p. 171]","Franklin D. Roosevelt","democracy,education,libraries"
+"Some people get an education without going to college. The rest get it after they get out.","Mark Twain","college,education"
+"Whatever their future, at the dawn of their lives, men seek a noble vision of man's nature and of life's potential.","Ayn Rand","idealism,philosophy"
+"Contrary to the ecologists, nature does not stand still and does not maintain the kind of equilibrium that guarantees the survival of any particular species - least of all the survival of her greatest and most fragile product: man.","Ayn Rand","environmentalism,man,philosophy"
+"Games lubricate the body and mind.","Benjamin Franklin","body,games,invigorate,lubricate,mind,refresh,renew,replenish"
+"In scientific thinking are always present elements of poetry. Science and music requires a thought homogeneous.","Albert Einstein","music,poetry,science"
+"I would not think that philosophy and reason themselves will be man's guide in the foreseeable future; however, they will remain the most beautiful sanctuary they have always been for the select few.","Albert Einstein","philosophy,thought"
+"Man is an end in himself.","Ayn Rand","individualism,life,objectivism,philosophy,success"
+"If a man has not discovered something that he will die for, he isn't fit to live","Martin Luther King Jr.","inspirational-quotes"
+"Fear is the Fatal killer of Desire.","Zig Ziglar","fear,inspiration,iuindia-com,marketing,motivational,rohitdaga"
+"Nor is there wanting in the pressSome spirit to stand simply forth,Heroic in it nakedness,Against the uttermost of earth.The tale of earth's unhonored thingsSounds nobler there than 'neath the sun;And the mind whirls and the heart sings,And a shout greets the daring one.","Robert Frost","life-quotes,poetry,poets,quotes,robert-frost"
+"Joy is the holy fire that keeps our purpose warm and our intelligence aglow.","Helen Keller","happiness,joy,purpose"
+"Try not to become a person of success, but rather try to become a person of value.","Albert Einstein","success"
+"People often say that motivation doesn't last. Well, neither does bathing - that's why we recommend it daily.","Zig Ziglar","daily,last,bathing"
+"Find a beautiful piece of art. If you fall in love with Van Gogh or Matisse or John Oliver Killens, or if you fall love with the music of Coltrane, the music of Aretha Franklin, or the music of Chopin - find some beautiful art and admire it, and realize that that was created by human beings just like you, no more human, no less.","Maya Angelou","love,music,beautiful"
+"The female is, as it were, a mutilated male, and the catamenia are semen, only not pure; for there is only one thing they have not in them, the principle of soul.","Aristotle","female,male,mutilated,soul"
+"When I lose the sense of motivation and the sense to prove something as a basketball player, it's time for me to move away from the game.","Michael Jordan","time,basketball,game"
+"The monotony and solitude of a quiet life stimulates the creative mind.","Albert Einstein","creative,solitude,quiet"
+"Wenn du weißt, behaupte, dass du es weißt. Und wenn du etwas nicht weißt, gib zu, dass du es nicht weißt. Das ist Wissen.","Confucius","knowledge,wisdom"
+"Let it be told to the future world that in the depth of winter, when nothing but hope and virtue could survive, that the city and the country, alarmed at one common danger, came forth to meet it.","Barack Obama","america,inauguration,inspiration"
+"We will restore science to its rightful place and wield technology's wonders to raise health care's quality and lower its cost.","Barack Obama","inspirational,politics,science"
+"Phantasie ist wichtiger als Wissen, denn Wissen ist begrenzt.","Albert Einstein","inspiration"
+"If that which we have found is the corruption of solitude, then what can men wish for save corruption? If this is the great evil of being alone, than what is good and what is evil?","Ayn Rand","dystopian,philosophy"
+"‎Teachers matter. So instead of bashing them, or defending the status quo, let’s offer schools a deal. Give them the resources to keep good teachers on the job, and reward the best ones. In return, grant schools flexibility: To teach with creativity and passion; to stop teaching to the test; and to replace teachers who just aren’t helping kids learn.","Barack Obama","education"
+"If you accept the expectations of others, especially negative ones, then you never will change the outcome.","Michael Jordan","change,negative,accept"
diff -pruN 8.17.2-2/examples/quotes/backend/quotes.py 9.2.0-1/examples/quotes/backend/quotes.py
--- 8.17.2-2/examples/quotes/backend/quotes.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/backend/quotes.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,179 @@
+import asyncio
+import csv
+import os
+from time import time
+from typing import Annotated
+
+from fastapi import FastAPI, HTTPException
+from pydantic import BaseModel, Field, ValidationError
+from sentence_transformers import SentenceTransformer
+
+from elasticsearch import NotFoundError
+from elasticsearch.dsl.pydantic import AsyncBaseESModel
+from elasticsearch import dsl
+
+model = SentenceTransformer("all-MiniLM-L6-v2")
+dsl.async_connections.create_connection(hosts=[os.environ['ELASTICSEARCH_URL']])
+
+
+class Quote(AsyncBaseESModel):
+    quote: str
+    author: Annotated[str, dsl.Keyword()]
+    tags: Annotated[list[str], dsl.Keyword()]
+    embedding: Annotated[list[float], dsl.DenseVector()] = Field(init=False, default=[])
+
+    class Index:
+        name = 'quotes'
+
+
+class Tag(BaseModel):
+    tag: str
+    count: int
+
+
+class SearchRequest(BaseModel):
+    query: str
+    filters: list[str]
+    knn: bool
+    start: int
+
+
+class SearchResponse(BaseModel):
+    quotes: list[Quote]
+    tags: list[Tag]
+    start: int
+    total: int
+
+
+app = FastAPI(
+    title="Quotes API",
+    version="1.0.0",
+)
+
+
+@app.get("/api/quotes/{id}")
+async def get_quote(id: str) -> Quote:
+    doc = None
+    try:
+        doc = await Quote._doc.get(id)
+    except NotFoundError:
+        pass
+    if not doc:
+        raise HTTPException(status_code=404, detail="Item not found")
+    return Quote.from_doc(doc)
+
+
+@app.post("/api/quotes", status_code=201)
+async def create_quote(req: Quote) -> Quote:
+    embed_quotes([req])
+    doc = req.to_doc()
+    doc.meta.id = ""
+    await doc.save(refresh=True)
+    return Quote.from_doc(doc)
+
+
+@app.put("/api/quotes/{id}")
+async def update_quote(id: str, quote: Quote) -> Quote:
+    doc = None
+    try:
+        doc = await Quote._doc.get(id)
+    except NotFoundError:
+        pass
+    if not doc:
+        raise HTTPException(status_code=404, detail="Item not found")
+    if quote.quote:
+        embed_quotes([quote])
+        doc.quote = quote.quote
+        doc.embedding = quote.embedding
+    if quote.author:
+        doc.author = quote.author
+    if quote.tags:
+        doc.tags = quote.tags
+    await doc.save(refresh=True)
+    return Quote.from_doc(doc)
+
+
+@app.delete("/api/quotes/{id}", status_code=204)
+async def delete_quote(id: str) -> None:
+    doc = None
+    try:
+        doc = await Quote._doc.get(id)
+    except NotFoundError:
+        pass
+    if not doc:
+        raise HTTPException(status_code=404, detail="Item not found")
+    await doc.delete(refresh=True)
+
+
+@app.post('/api/search')
+async def search_quotes(req: SearchRequest) -> SearchResponse:
+    s = Quote._doc.search()
+    if req.query == '':
+        s = s.query(dsl.query.MatchAll())
+    elif req.knn:
+        query_vector = model.encode(req.query).tolist()
+        s = s.query(dsl.query.Knn(field=Quote._doc.embedding, query_vector=query_vector))
+    else:
+        s = s.query(dsl.query.Match(quote=req.query))
+    for tag in req.filters:
+        s = s.filter(dsl.query.Terms(tags=[tag]))
+    s.aggs.bucket('tags', dsl.aggs.Terms(field=Quote._doc.tags, size=100))
+
+    r = await s[req.start:req.start + 25].execute()
+    tags = [(tag.key, tag.doc_count) for tag in r.aggs.tags.buckets]
+    quotes = [Quote.from_doc(hit) for hit in r.hits]
+    total = r['hits'].total.value
+    
+    return SearchResponse(
+        quotes=quotes,
+        tags=[Tag(tag=t[0], count=t[1]) for t in tags],
+        start=req.start,
+        total=total
+    )
+
+
+def embed_quotes(quotes):
+    embeddings = model.encode([q.quote for q in quotes])
+    for q, e in zip(quotes, embeddings):
+        q.embedding = e.tolist()
+
+
+async def ingest_quotes():
+    if await Quote._doc._index.exists():
+        await Quote._doc._index.delete()
+    await Quote._doc.init()
+
+    def ingest_progress(count, start):
+        elapsed = time() - start
+        print(f'\rIngested {count} quotes. ({count / elapsed:.0f}/sec)', end='')
+
+    async def get_next_quote():
+        quotes: list[Quote] = []
+        with open('quotes.csv') as f:
+            reader = csv.DictReader(f)
+            count = 0
+            start = time()
+            for row in reader:
+                q = Quote(quote=row['quote'], author=row['author'],
+                             tags=row['tags'].split(','))
+                quotes.append(q)
+                if len(quotes) == 512:
+                    embed_quotes(quotes)
+                    for q in quotes:
+                        yield q.to_doc()
+                    count += len(quotes)
+                    ingest_progress(count, start)
+                    quotes = []
+            if len(quotes) > 0:
+                embed_quotes(quotes)
+                for q in quotes:
+                    yield q.to_doc()
+                count += len(quotes)
+                ingest_progress(count, start)
+
+    await Quote._doc.bulk(get_next_quote())
+    print("\nIngest complete.")
+
+
+if __name__ == "__main__":
+    asyncio.run(ingest_quotes())
diff -pruN 8.17.2-2/examples/quotes/backend/requirements.txt 9.2.0-1/examples/quotes/backend/requirements.txt
--- 8.17.2-2/examples/quotes/backend/requirements.txt	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/backend/requirements.txt	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,163 @@
+#
+# This file is autogenerated by pip-compile with Python 3.12
+# by the following command:
+#
+#    pip-compile pyproject.toml
+#
+aiohappyeyeballs==2.6.1
+    # via aiohttp
+aiohttp==3.12.15
+    # via elasticsearch
+aiosignal==1.4.0
+    # via aiohttp
+annotated-types==0.7.0
+    # via pydantic
+anyio==4.11.0
+    # via starlette
+attrs==25.3.0
+    # via aiohttp
+certifi==2025.8.3
+    # via
+    #   elastic-transport
+    #   requests
+charset-normalizer==3.4.3
+    # via requests
+click==8.3.0
+    # via uvicorn
+elastic-transport==8.17.1
+    # via elasticsearch
+elasticsearch[async]==8.19.1
+    # via quotes (pyproject.toml)
+fastapi==0.117.1
+    # via quotes (pyproject.toml)
+filelock==3.19.1
+    # via
+    #   huggingface-hub
+    #   torch
+    #   transformers
+frozenlist==1.7.0
+    # via
+    #   aiohttp
+    #   aiosignal
+fsspec==2025.9.0
+    # via
+    #   huggingface-hub
+    #   torch
+h11==0.16.0
+    # via uvicorn
+hf-xet==1.1.10
+    # via huggingface-hub
+huggingface-hub==0.35.0
+    # via
+    #   sentence-transformers
+    #   tokenizers
+    #   transformers
+idna==3.10
+    # via
+    #   anyio
+    #   requests
+    #   yarl
+jinja2==3.1.6
+    # via torch
+joblib==1.5.2
+    # via scikit-learn
+markupsafe==3.0.2
+    # via jinja2
+mpmath==1.3.0
+    # via sympy
+multidict==6.6.4
+    # via
+    #   aiohttp
+    #   yarl
+networkx==3.5
+    # via torch
+numpy==2.3.3
+    # via
+    #   scikit-learn
+    #   scipy
+    #   transformers
+orjson==3.11.3
+    # via quotes (pyproject.toml)
+packaging==25.0
+    # via
+    #   huggingface-hub
+    #   transformers
+pillow==11.3.0
+    # via sentence-transformers
+propcache==0.3.2
+    # via
+    #   aiohttp
+    #   yarl
+pydantic==2.11.9
+    # via fastapi
+pydantic-core==2.33.2
+    # via pydantic
+python-dateutil==2.9.0.post0
+    # via elasticsearch
+pyyaml==6.0.2
+    # via
+    #   huggingface-hub
+    #   transformers
+regex==2025.9.18
+    # via transformers
+requests==2.32.5
+    # via
+    #   huggingface-hub
+    #   transformers
+safetensors==0.6.2
+    # via transformers
+scikit-learn==1.7.2
+    # via sentence-transformers
+scipy==1.16.2
+    # via
+    #   scikit-learn
+    #   sentence-transformers
+sentence-transformers==5.1.1
+    # via quotes (pyproject.toml)
+six==1.17.0
+    # via python-dateutil
+sniffio==1.3.1
+    # via anyio
+starlette==0.48.0
+    # via fastapi
+sympy==1.14.0
+    # via torch
+threadpoolctl==3.6.0
+    # via scikit-learn
+tokenizers==0.22.1
+    # via transformers
+torch==2.8.0
+    # via sentence-transformers
+tqdm==4.67.1
+    # via
+    #   huggingface-hub
+    #   sentence-transformers
+    #   transformers
+transformers==4.56.2
+    # via sentence-transformers
+typing-extensions==4.15.0
+    # via
+    #   aiosignal
+    #   anyio
+    #   elasticsearch
+    #   fastapi
+    #   huggingface-hub
+    #   pydantic
+    #   pydantic-core
+    #   sentence-transformers
+    #   starlette
+    #   torch
+    #   typing-inspection
+typing-inspection==0.4.1
+    # via pydantic
+urllib3==2.5.0
+    # via
+    #   elastic-transport
+    #   requests
+uvicorn==0.36.0
+    # via quotes (pyproject.toml)
+yarl==1.20.1
+    # via aiohttp
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
diff -pruN 8.17.2-2/examples/quotes/eslint.config.js 9.2.0-1/examples/quotes/eslint.config.js
--- 8.17.2-2/examples/quotes/eslint.config.js	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/eslint.config.js	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,23 @@
+import js from '@eslint/js'
+import globals from 'globals'
+import reactHooks from 'eslint-plugin-react-hooks'
+import reactRefresh from 'eslint-plugin-react-refresh'
+import tseslint from 'typescript-eslint'
+import { defineConfig, globalIgnores } from 'eslint/config'
+
+export default defineConfig([
+  globalIgnores(['dist']),
+  {
+    files: ['**/*.{ts,tsx}'],
+    extends: [
+      js.configs.recommended,
+      tseslint.configs.recommended,
+      reactHooks.configs['recommended-latest'],
+      reactRefresh.configs.vite,
+    ],
+    languageOptions: {
+      ecmaVersion: 2020,
+      globals: globals.browser,
+    },
+  },
+])
diff -pruN 8.17.2-2/examples/quotes/index.html 9.2.0-1/examples/quotes/index.html
--- 8.17.2-2/examples/quotes/index.html	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/index.html	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,13 @@
+<!doctype html>
+<html lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <link rel="icon" type="image/svg+xml" href="/vite.svg" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <title>Elasticsearch + Pydantic Demo</title>
+  </head>
+  <body>
+    <div id="root"></div>
+    <script type="module" src="/src/main.tsx"></script>
+  </body>
+</html>
diff -pruN 8.17.2-2/examples/quotes/package-lock.json 9.2.0-1/examples/quotes/package-lock.json
--- 8.17.2-2/examples/quotes/package-lock.json	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/package-lock.json	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,3756 @@
+{
+  "name": "quotes",
+  "version": "0.0.0",
+  "lockfileVersion": 3,
+  "requires": true,
+  "packages": {
+    "": {
+      "name": "quotes",
+      "version": "0.0.0",
+      "hasInstallScript": true,
+      "dependencies": {
+        "boostrap": "^2.0.0",
+        "bootstrap": "^5.3.8",
+        "react": "^19.1.1",
+        "react-bootstrap": "^2.10.10",
+        "react-dom": "^19.1.1",
+        "react-router": "^7.9.2"
+      },
+      "devDependencies": {
+        "@eslint/js": "^9.36.0",
+        "@types/react": "^19.1.13",
+        "@types/react-dom": "^19.1.9",
+        "@vitejs/plugin-react": "^5.0.3",
+        "eslint": "^9.36.0",
+        "eslint-plugin-react-hooks": "^5.2.0",
+        "eslint-plugin-react-refresh": "^0.4.20",
+        "globals": "^16.4.0",
+        "typescript": "~5.8.3",
+        "typescript-eslint": "^8.44.0",
+        "vite": "^7.1.7"
+      }
+    },
+    "node_modules/@babel/code-frame": {
+      "version": "7.27.1",
+      "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+      "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/helper-validator-identifier": "^7.27.1",
+        "js-tokens": "^4.0.0",
+        "picocolors": "^1.1.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/compat-data": {
+      "version": "7.28.4",
+      "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.4.tgz",
+      "integrity": "sha512-YsmSKC29MJwf0gF8Rjjrg5LQCmyh+j/nD8/eP7f+BeoQTKYqs9RoWbjGOdy0+1Ekr68RJZMUOPVQaQisnIo4Rw==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/core": {
+      "version": "7.28.4",
+      "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.4.tgz",
+      "integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/code-frame": "^7.27.1",
+        "@babel/generator": "^7.28.3",
+        "@babel/helper-compilation-targets": "^7.27.2",
+        "@babel/helper-module-transforms": "^7.28.3",
+        "@babel/helpers": "^7.28.4",
+        "@babel/parser": "^7.28.4",
+        "@babel/template": "^7.27.2",
+        "@babel/traverse": "^7.28.4",
+        "@babel/types": "^7.28.4",
+        "@jridgewell/remapping": "^2.3.5",
+        "convert-source-map": "^2.0.0",
+        "debug": "^4.1.0",
+        "gensync": "^1.0.0-beta.2",
+        "json5": "^2.2.3",
+        "semver": "^6.3.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/babel"
+      }
+    },
+    "node_modules/@babel/generator": {
+      "version": "7.28.3",
+      "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.3.tgz",
+      "integrity": "sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/parser": "^7.28.3",
+        "@babel/types": "^7.28.2",
+        "@jridgewell/gen-mapping": "^0.3.12",
+        "@jridgewell/trace-mapping": "^0.3.28",
+        "jsesc": "^3.0.2"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helper-compilation-targets": {
+      "version": "7.27.2",
+      "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
+      "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/compat-data": "^7.27.2",
+        "@babel/helper-validator-option": "^7.27.1",
+        "browserslist": "^4.24.0",
+        "lru-cache": "^5.1.1",
+        "semver": "^6.3.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helper-globals": {
+      "version": "7.28.0",
+      "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
+      "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helper-module-imports": {
+      "version": "7.27.1",
+      "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
+      "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/traverse": "^7.27.1",
+        "@babel/types": "^7.27.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helper-module-transforms": {
+      "version": "7.28.3",
+      "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
+      "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/helper-module-imports": "^7.27.1",
+        "@babel/helper-validator-identifier": "^7.27.1",
+        "@babel/traverse": "^7.28.3"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      },
+      "peerDependencies": {
+        "@babel/core": "^7.0.0"
+      }
+    },
+    "node_modules/@babel/helper-plugin-utils": {
+      "version": "7.27.1",
+      "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
+      "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helper-string-parser": {
+      "version": "7.27.1",
+      "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+      "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helper-validator-identifier": {
+      "version": "7.27.1",
+      "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz",
+      "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helper-validator-option": {
+      "version": "7.27.1",
+      "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
+      "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helpers": {
+      "version": "7.28.4",
+      "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+      "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/template": "^7.27.2",
+        "@babel/types": "^7.28.4"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/parser": {
+      "version": "7.28.4",
+      "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.4.tgz",
+      "integrity": "sha512-yZbBqeM6TkpP9du/I2pUZnJsRMGGvOuIrhjzC1AwHwW+6he4mni6Bp/m8ijn0iOuZuPI2BfkCoSRunpyjnrQKg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/types": "^7.28.4"
+      },
+      "bin": {
+        "parser": "bin/babel-parser.js"
+      },
+      "engines": {
+        "node": ">=6.0.0"
+      }
+    },
+    "node_modules/@babel/plugin-transform-react-jsx-self": {
+      "version": "7.27.1",
+      "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
+      "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/helper-plugin-utils": "^7.27.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      },
+      "peerDependencies": {
+        "@babel/core": "^7.0.0-0"
+      }
+    },
+    "node_modules/@babel/plugin-transform-react-jsx-source": {
+      "version": "7.27.1",
+      "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
+      "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/helper-plugin-utils": "^7.27.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      },
+      "peerDependencies": {
+        "@babel/core": "^7.0.0-0"
+      }
+    },
+    "node_modules/@babel/runtime": {
+      "version": "7.28.4",
+      "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz",
+      "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/template": {
+      "version": "7.27.2",
+      "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+      "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/code-frame": "^7.27.1",
+        "@babel/parser": "^7.27.2",
+        "@babel/types": "^7.27.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/traverse": {
+      "version": "7.28.4",
+      "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.4.tgz",
+      "integrity": "sha512-YEzuboP2qvQavAcjgQNVgsvHIDv6ZpwXvcvjmyySP2DIMuByS/6ioU5G9pYrWHM6T2YDfc7xga9iNzYOs12CFQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/code-frame": "^7.27.1",
+        "@babel/generator": "^7.28.3",
+        "@babel/helper-globals": "^7.28.0",
+        "@babel/parser": "^7.28.4",
+        "@babel/template": "^7.27.2",
+        "@babel/types": "^7.28.4",
+        "debug": "^4.3.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/types": {
+      "version": "7.28.4",
+      "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.4.tgz",
+      "integrity": "sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/helper-string-parser": "^7.27.1",
+        "@babel/helper-validator-identifier": "^7.27.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@esbuild/aix-ppc64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.10.tgz",
+      "integrity": "sha512-0NFWnA+7l41irNuaSVlLfgNT12caWJVLzp5eAVhZ0z1qpxbockccEt3s+149rE64VUI3Ml2zt8Nv5JVc4QXTsw==",
+      "cpu": [
+        "ppc64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "aix"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/android-arm": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.10.tgz",
+      "integrity": "sha512-dQAxF1dW1C3zpeCDc5KqIYuZ1tgAdRXNoZP7vkBIRtKZPYe2xVr/d3SkirklCHudW1B45tGiUlz2pUWDfbDD4w==",
+      "cpu": [
+        "arm"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "android"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/android-arm64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.10.tgz",
+      "integrity": "sha512-LSQa7eDahypv/VO6WKohZGPSJDq5OVOo3UoFR1E4t4Gj1W7zEQMUhI+lo81H+DtB+kP+tDgBp+M4oNCwp6kffg==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "android"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/android-x64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.10.tgz",
+      "integrity": "sha512-MiC9CWdPrfhibcXwr39p9ha1x0lZJ9KaVfvzA0Wxwz9ETX4v5CHfF09bx935nHlhi+MxhA63dKRRQLiVgSUtEg==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "android"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/darwin-arm64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.10.tgz",
+      "integrity": "sha512-JC74bdXcQEpW9KkV326WpZZjLguSZ3DfS8wrrvPMHgQOIEIG/sPXEN/V8IssoJhbefLRcRqw6RQH2NnpdprtMA==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "darwin"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/darwin-x64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.10.tgz",
+      "integrity": "sha512-tguWg1olF6DGqzws97pKZ8G2L7Ig1vjDmGTwcTuYHbuU6TTjJe5FXbgs5C1BBzHbJ2bo1m3WkQDbWO2PvamRcg==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "darwin"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/freebsd-arm64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.10.tgz",
+      "integrity": "sha512-3ZioSQSg1HT2N05YxeJWYR+Libe3bREVSdWhEEgExWaDtyFbbXWb49QgPvFH8u03vUPX10JhJPcz7s9t9+boWg==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "freebsd"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/freebsd-x64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.10.tgz",
+      "integrity": "sha512-LLgJfHJk014Aa4anGDbh8bmI5Lk+QidDmGzuC2D+vP7mv/GeSN+H39zOf7pN5N8p059FcOfs2bVlrRr4SK9WxA==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "freebsd"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-arm": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.10.tgz",
+      "integrity": "sha512-oR31GtBTFYCqEBALI9r6WxoU/ZofZl962pouZRTEYECvNF/dtXKku8YXcJkhgK/beU+zedXfIzHijSRapJY3vg==",
+      "cpu": [
+        "arm"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-arm64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.10.tgz",
+      "integrity": "sha512-5luJWN6YKBsawd5f9i4+c+geYiVEw20FVW5x0v1kEMWNq8UctFjDiMATBxLvmmHA4bf7F6hTRaJgtghFr9iziQ==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-ia32": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.10.tgz",
+      "integrity": "sha512-NrSCx2Kim3EnnWgS4Txn0QGt0Xipoumb6z6sUtl5bOEZIVKhzfyp/Lyw4C1DIYvzeW/5mWYPBFJU3a/8Yr75DQ==",
+      "cpu": [
+        "ia32"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-loong64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.10.tgz",
+      "integrity": "sha512-xoSphrd4AZda8+rUDDfD9J6FUMjrkTz8itpTITM4/xgerAZZcFW7Dv+sun7333IfKxGG8gAq+3NbfEMJfiY+Eg==",
+      "cpu": [
+        "loong64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-mips64el": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.10.tgz",
+      "integrity": "sha512-ab6eiuCwoMmYDyTnyptoKkVS3k8fy/1Uvq7Dj5czXI6DF2GqD2ToInBI0SHOp5/X1BdZ26RKc5+qjQNGRBelRA==",
+      "cpu": [
+        "mips64el"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-ppc64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.10.tgz",
+      "integrity": "sha512-NLinzzOgZQsGpsTkEbdJTCanwA5/wozN9dSgEl12haXJBzMTpssebuXR42bthOF3z7zXFWH1AmvWunUCkBE4EA==",
+      "cpu": [
+        "ppc64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-riscv64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.10.tgz",
+      "integrity": "sha512-FE557XdZDrtX8NMIeA8LBJX3dC2M8VGXwfrQWU7LB5SLOajfJIxmSdyL/gU1m64Zs9CBKvm4UAuBp5aJ8OgnrA==",
+      "cpu": [
+        "riscv64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-s390x": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.10.tgz",
+      "integrity": "sha512-3BBSbgzuB9ajLoVZk0mGu+EHlBwkusRmeNYdqmznmMc9zGASFjSsxgkNsqmXugpPk00gJ0JNKh/97nxmjctdew==",
+      "cpu": [
+        "s390x"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/linux-x64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.10.tgz",
+      "integrity": "sha512-QSX81KhFoZGwenVyPoberggdW1nrQZSvfVDAIUXr3WqLRZGZqWk/P4T8p2SP+de2Sr5HPcvjhcJzEiulKgnxtA==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/netbsd-arm64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.10.tgz",
+      "integrity": "sha512-AKQM3gfYfSW8XRk8DdMCzaLUFB15dTrZfnX8WXQoOUpUBQ+NaAFCP1kPS/ykbbGYz7rxn0WS48/81l9hFl3u4A==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "netbsd"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/netbsd-x64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.10.tgz",
+      "integrity": "sha512-7RTytDPGU6fek/hWuN9qQpeGPBZFfB4zZgcz2VK2Z5VpdUxEI8JKYsg3JfO0n/Z1E/6l05n0unDCNc4HnhQGig==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "netbsd"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/openbsd-arm64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.10.tgz",
+      "integrity": "sha512-5Se0VM9Wtq797YFn+dLimf2Zx6McttsH2olUBsDml+lm0GOCRVebRWUvDtkY4BWYv/3NgzS8b/UM3jQNh5hYyw==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "openbsd"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/openbsd-x64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.10.tgz",
+      "integrity": "sha512-XkA4frq1TLj4bEMB+2HnI0+4RnjbuGZfet2gs/LNs5Hc7D89ZQBHQ0gL2ND6Lzu1+QVkjp3x1gIcPKzRNP8bXw==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "openbsd"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/openharmony-arm64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.10.tgz",
+      "integrity": "sha512-AVTSBhTX8Y/Fz6OmIVBip9tJzZEUcY8WLh7I59+upa5/GPhh2/aM6bvOMQySspnCCHvFi79kMtdJS1w0DXAeag==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "openharmony"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/sunos-x64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.10.tgz",
+      "integrity": "sha512-fswk3XT0Uf2pGJmOpDB7yknqhVkJQkAQOcW/ccVOtfx05LkbWOaRAtn5SaqXypeKQra1QaEa841PgrSL9ubSPQ==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "sunos"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/win32-arm64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.10.tgz",
+      "integrity": "sha512-ah+9b59KDTSfpaCg6VdJoOQvKjI33nTaQr4UluQwW7aEwZQsbMCfTmfEO4VyewOxx4RaDT/xCy9ra2GPWmO7Kw==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "win32"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/win32-ia32": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.10.tgz",
+      "integrity": "sha512-QHPDbKkrGO8/cz9LKVnJU22HOi4pxZnZhhA2HYHez5Pz4JeffhDjf85E57Oyco163GnzNCVkZK0b/n4Y0UHcSw==",
+      "cpu": [
+        "ia32"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "win32"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@esbuild/win32-x64": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.10.tgz",
+      "integrity": "sha512-9KpxSVFCu0iK1owoez6aC/s/EdUQLDN3adTxGCqxMVhrPDj6bt5dbrHDXUuq+Bs2vATFBBrQS5vdQ/Ed2P+nbw==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "win32"
+      ],
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/@eslint-community/eslint-utils": {
+      "version": "4.9.0",
+      "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz",
+      "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "eslint-visitor-keys": "^3.4.3"
+      },
+      "engines": {
+        "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+      },
+      "funding": {
+        "url": "https://opencollective.com/eslint"
+      },
+      "peerDependencies": {
+        "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0"
+      }
+    },
+    "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": {
+      "version": "3.4.3",
+      "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
+      "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "engines": {
+        "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+      },
+      "funding": {
+        "url": "https://opencollective.com/eslint"
+      }
+    },
+    "node_modules/@eslint-community/regexpp": {
+      "version": "4.12.1",
+      "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz",
+      "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": "^12.0.0 || ^14.0.0 || >=16.0.0"
+      }
+    },
+    "node_modules/@eslint/config-array": {
+      "version": "0.21.0",
+      "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.0.tgz",
+      "integrity": "sha512-ENIdc4iLu0d93HeYirvKmrzshzofPw6VkZRKQGe9Nv46ZnWUzcF1xV01dcvEg/1wXUR61OmmlSfyeyO7EvjLxQ==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@eslint/object-schema": "^2.1.6",
+        "debug": "^4.3.1",
+        "minimatch": "^3.1.2"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      }
+    },
+    "node_modules/@eslint/config-helpers": {
+      "version": "0.3.1",
+      "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.3.1.tgz",
+      "integrity": "sha512-xR93k9WhrDYpXHORXpxVL5oHj3Era7wo6k/Wd8/IsQNnZUTzkGS29lyn3nAT05v6ltUuTFVCCYDEGfy2Or/sPA==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      }
+    },
+    "node_modules/@eslint/core": {
+      "version": "0.15.2",
+      "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.2.tgz",
+      "integrity": "sha512-78Md3/Rrxh83gCxoUc0EiciuOHsIITzLy53m3d9UyiW8y9Dj2D29FeETqyKA+BRK76tnTp6RXWb3pCay8Oyomg==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@types/json-schema": "^7.0.15"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      }
+    },
+    "node_modules/@eslint/eslintrc": {
+      "version": "3.3.1",
+      "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz",
+      "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "ajv": "^6.12.4",
+        "debug": "^4.3.2",
+        "espree": "^10.0.1",
+        "globals": "^14.0.0",
+        "ignore": "^5.2.0",
+        "import-fresh": "^3.2.1",
+        "js-yaml": "^4.1.0",
+        "minimatch": "^3.1.2",
+        "strip-json-comments": "^3.1.1"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "url": "https://opencollective.com/eslint"
+      }
+    },
+    "node_modules/@eslint/eslintrc/node_modules/globals": {
+      "version": "14.0.0",
+      "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz",
+      "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=18"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/@eslint/js": {
+      "version": "9.36.0",
+      "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.36.0.tgz",
+      "integrity": "sha512-uhCbYtYynH30iZErszX78U+nR3pJU3RHGQ57NXy5QupD4SBVwDeU8TNBy+MjMngc1UyIW9noKqsRqfjQTBU2dw==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "url": "https://eslint.org/donate"
+      }
+    },
+    "node_modules/@eslint/object-schema": {
+      "version": "2.1.6",
+      "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz",
+      "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      }
+    },
+    "node_modules/@eslint/plugin-kit": {
+      "version": "0.3.5",
+      "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.5.tgz",
+      "integrity": "sha512-Z5kJ+wU3oA7MMIqVR9tyZRtjYPr4OC004Q4Rw7pgOKUOKkJfZ3O24nz3WYfGRpMDNmcOi3TwQOmgm7B7Tpii0w==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@eslint/core": "^0.15.2",
+        "levn": "^0.4.1"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      }
+    },
+    "node_modules/@humanfs/core": {
+      "version": "0.19.1",
+      "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz",
+      "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "engines": {
+        "node": ">=18.18.0"
+      }
+    },
+    "node_modules/@humanfs/node": {
+      "version": "0.16.7",
+      "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz",
+      "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@humanfs/core": "^0.19.1",
+        "@humanwhocodes/retry": "^0.4.0"
+      },
+      "engines": {
+        "node": ">=18.18.0"
+      }
+    },
+    "node_modules/@humanwhocodes/module-importer": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz",
+      "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "engines": {
+        "node": ">=12.22"
+      },
+      "funding": {
+        "type": "github",
+        "url": "https://github.com/sponsors/nzakas"
+      }
+    },
+    "node_modules/@humanwhocodes/retry": {
+      "version": "0.4.3",
+      "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz",
+      "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "engines": {
+        "node": ">=18.18"
+      },
+      "funding": {
+        "type": "github",
+        "url": "https://github.com/sponsors/nzakas"
+      }
+    },
+    "node_modules/@jridgewell/gen-mapping": {
+      "version": "0.3.13",
+      "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+      "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@jridgewell/sourcemap-codec": "^1.5.0",
+        "@jridgewell/trace-mapping": "^0.3.24"
+      }
+    },
+    "node_modules/@jridgewell/remapping": {
+      "version": "2.3.5",
+      "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+      "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@jridgewell/gen-mapping": "^0.3.5",
+        "@jridgewell/trace-mapping": "^0.3.24"
+      }
+    },
+    "node_modules/@jridgewell/resolve-uri": {
+      "version": "3.1.2",
+      "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+      "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.0.0"
+      }
+    },
+    "node_modules/@jridgewell/sourcemap-codec": {
+      "version": "1.5.5",
+      "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+      "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/@jridgewell/trace-mapping": {
+      "version": "0.3.31",
+      "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+      "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@jridgewell/resolve-uri": "^3.1.0",
+        "@jridgewell/sourcemap-codec": "^1.4.14"
+      }
+    },
+    "node_modules/@nodelib/fs.scandir": {
+      "version": "2.1.5",
+      "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+      "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@nodelib/fs.stat": "2.0.5",
+        "run-parallel": "^1.1.9"
+      },
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/@nodelib/fs.stat": {
+      "version": "2.0.5",
+      "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+      "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/@nodelib/fs.walk": {
+      "version": "1.2.8",
+      "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+      "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@nodelib/fs.scandir": "2.1.5",
+        "fastq": "^1.6.0"
+      },
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/@popperjs/core": {
+      "version": "2.11.8",
+      "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz",
+      "integrity": "sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==",
+      "license": "MIT",
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/popperjs"
+      }
+    },
+    "node_modules/@react-aria/ssr": {
+      "version": "3.9.10",
+      "resolved": "https://registry.npmjs.org/@react-aria/ssr/-/ssr-3.9.10.tgz",
+      "integrity": "sha512-hvTm77Pf+pMBhuBm760Li0BVIO38jv1IBws1xFm1NoL26PU+fe+FMW5+VZWyANR6nYL65joaJKZqOdTQMkO9IQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@swc/helpers": "^0.5.0"
+      },
+      "engines": {
+        "node": ">= 12"
+      },
+      "peerDependencies": {
+        "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
+      }
+    },
+    "node_modules/@restart/hooks": {
+      "version": "0.4.16",
+      "resolved": "https://registry.npmjs.org/@restart/hooks/-/hooks-0.4.16.tgz",
+      "integrity": "sha512-f7aCv7c+nU/3mF7NWLtVVr0Ra80RqsO89hO72r+Y/nvQr5+q0UFGkocElTH6MJApvReVh6JHUFYn2cw1WdHF3w==",
+      "license": "MIT",
+      "dependencies": {
+        "dequal": "^2.0.3"
+      },
+      "peerDependencies": {
+        "react": ">=16.8.0"
+      }
+    },
+    "node_modules/@restart/ui": {
+      "version": "1.9.4",
+      "resolved": "https://registry.npmjs.org/@restart/ui/-/ui-1.9.4.tgz",
+      "integrity": "sha512-N4C7haUc3vn4LTwVUPlkJN8Ach/+yIMvRuTVIhjilNHqegY60SGLrzud6errOMNJwSnmYFnt1J0H/k8FE3A4KA==",
+      "license": "MIT",
+      "dependencies": {
+        "@babel/runtime": "^7.26.0",
+        "@popperjs/core": "^2.11.8",
+        "@react-aria/ssr": "^3.5.0",
+        "@restart/hooks": "^0.5.0",
+        "@types/warning": "^3.0.3",
+        "dequal": "^2.0.3",
+        "dom-helpers": "^5.2.0",
+        "uncontrollable": "^8.0.4",
+        "warning": "^4.0.3"
+      },
+      "peerDependencies": {
+        "react": ">=16.14.0",
+        "react-dom": ">=16.14.0"
+      }
+    },
+    "node_modules/@restart/ui/node_modules/@restart/hooks": {
+      "version": "0.5.1",
+      "resolved": "https://registry.npmjs.org/@restart/hooks/-/hooks-0.5.1.tgz",
+      "integrity": "sha512-EMoH04NHS1pbn07iLTjIjgttuqb7qu4+/EyhAx27MHpoENcB2ZdSsLTNxmKD+WEPnZigo62Qc8zjGnNxoSE/5Q==",
+      "license": "MIT",
+      "dependencies": {
+        "dequal": "^2.0.3"
+      },
+      "peerDependencies": {
+        "react": ">=16.8.0"
+      }
+    },
+    "node_modules/@restart/ui/node_modules/uncontrollable": {
+      "version": "8.0.4",
+      "resolved": "https://registry.npmjs.org/uncontrollable/-/uncontrollable-8.0.4.tgz",
+      "integrity": "sha512-ulRWYWHvscPFc0QQXvyJjY6LIXU56f0h8pQFvhxiKk5V1fcI8gp9Ht9leVAhrVjzqMw0BgjspBINx9r6oyJUvQ==",
+      "license": "MIT",
+      "peerDependencies": {
+        "react": ">=16.14.0"
+      }
+    },
+    "node_modules/@rolldown/pluginutils": {
+      "version": "1.0.0-beta.35",
+      "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.35.tgz",
+      "integrity": "sha512-slYrCpoxJUqzFDDNlvrOYRazQUNRvWPjXA17dAOISY3rDMxX6k8K4cj2H+hEYMHF81HO3uNd5rHVigAWRM5dSg==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/@rollup/rollup-android-arm-eabi": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.2.tgz",
+      "integrity": "sha512-o3pcKzJgSGt4d74lSZ+OCnHwkKBeAbFDmbEm5gg70eA8VkyCuC/zV9TwBnmw6VjDlRdF4Pshfb+WE9E6XY1PoQ==",
+      "cpu": [
+        "arm"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "android"
+      ]
+    },
+    "node_modules/@rollup/rollup-android-arm64": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.2.tgz",
+      "integrity": "sha512-cqFSWO5tX2vhC9hJTK8WAiPIm4Q8q/cU8j2HQA0L3E1uXvBYbOZMhE2oFL8n2pKB5sOCHY6bBuHaRwG7TkfJyw==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "android"
+      ]
+    },
+    "node_modules/@rollup/rollup-darwin-arm64": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.2.tgz",
+      "integrity": "sha512-vngduywkkv8Fkh3wIZf5nFPXzWsNsVu1kvtLETWxTFf/5opZmflgVSeLgdHR56RQh71xhPhWoOkEBvbehwTlVA==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "darwin"
+      ]
+    },
+    "node_modules/@rollup/rollup-darwin-x64": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.2.tgz",
+      "integrity": "sha512-h11KikYrUCYTrDj6h939hhMNlqU2fo/X4NB0OZcys3fya49o1hmFaczAiJWVAFgrM1NCP6RrO7lQKeVYSKBPSQ==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "darwin"
+      ]
+    },
+    "node_modules/@rollup/rollup-freebsd-arm64": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.2.tgz",
+      "integrity": "sha512-/eg4CI61ZUkLXxMHyVlmlGrSQZ34xqWlZNW43IAU4RmdzWEx0mQJ2mN/Cx4IHLVZFL6UBGAh+/GXhgvGb+nVxw==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "freebsd"
+      ]
+    },
+    "node_modules/@rollup/rollup-freebsd-x64": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.2.tgz",
+      "integrity": "sha512-QOWgFH5X9+p+S1NAfOqc0z8qEpJIoUHf7OWjNUGOeW18Mx22lAUOiA9b6r2/vpzLdfxi/f+VWsYjUOMCcYh0Ng==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "freebsd"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.2.tgz",
+      "integrity": "sha512-kDWSPafToDd8LcBYd1t5jw7bD5Ojcu12S3uT372e5HKPzQt532vW+rGFFOaiR0opxePyUkHrwz8iWYEyH1IIQA==",
+      "cpu": [
+        "arm"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.2.tgz",
+      "integrity": "sha512-gKm7Mk9wCv6/rkzwCiUC4KnevYhlf8ztBrDRT9g/u//1fZLapSRc+eDZj2Eu2wpJ+0RzUKgtNijnVIB4ZxyL+w==",
+      "cpu": [
+        "arm"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-arm64-gnu": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.2.tgz",
+      "integrity": "sha512-66lA8vnj5mB/rtDNwPgrrKUOtCLVQypkyDa2gMfOefXK6rcZAxKLO9Fy3GkW8VkPnENv9hBkNOFfGLf6rNKGUg==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-arm64-musl": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.2.tgz",
+      "integrity": "sha512-s+OPucLNdJHvuZHuIz2WwncJ+SfWHFEmlC5nKMUgAelUeBUnlB4wt7rXWiyG4Zn07uY2Dd+SGyVa9oyLkVGOjA==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-loong64-gnu": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.2.tgz",
+      "integrity": "sha512-8wTRM3+gVMDLLDdaT6tKmOE3lJyRy9NpJUS/ZRWmLCmOPIJhVyXwjBo+XbrrwtV33Em1/eCTd5TuGJm4+DmYjw==",
+      "cpu": [
+        "loong64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-ppc64-gnu": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.2.tgz",
+      "integrity": "sha512-6yqEfgJ1anIeuP2P/zhtfBlDpXUb80t8DpbYwXQ3bQd95JMvUaqiX+fKqYqUwZXqdJDd8xdilNtsHM2N0cFm6A==",
+      "cpu": [
+        "ppc64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.2.tgz",
+      "integrity": "sha512-sshYUiYVSEI2B6dp4jMncwxbrUqRdNApF2c3bhtLAU0qA8Lrri0p0NauOsTWh3yCCCDyBOjESHMExonp7Nzc0w==",
+      "cpu": [
+        "riscv64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-riscv64-musl": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.2.tgz",
+      "integrity": "sha512-duBLgd+3pqC4MMwBrKkFxaZerUxZcYApQVC5SdbF5/e/589GwVvlRUnyqMFbM8iUSb1BaoX/3fRL7hB9m2Pj8Q==",
+      "cpu": [
+        "riscv64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-s390x-gnu": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.2.tgz",
+      "integrity": "sha512-tzhYJJidDUVGMgVyE+PmxENPHlvvqm1KILjjZhB8/xHYqAGeizh3GBGf9u6WdJpZrz1aCpIIHG0LgJgH9rVjHQ==",
+      "cpu": [
+        "s390x"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-x64-gnu": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.2.tgz",
+      "integrity": "sha512-opH8GSUuVcCSSyHHcl5hELrmnk4waZoVpgn/4FDao9iyE4WpQhyWJ5ryl5M3ocp4qkRuHfyXnGqg8M9oKCEKRA==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-linux-x64-musl": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.2.tgz",
+      "integrity": "sha512-LSeBHnGli1pPKVJ79ZVJgeZWWZXkEe/5o8kcn23M8eMKCUANejchJbF/JqzM4RRjOJfNRhKJk8FuqL1GKjF5oQ==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "linux"
+      ]
+    },
+    "node_modules/@rollup/rollup-openharmony-arm64": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.2.tgz",
+      "integrity": "sha512-uPj7MQ6/s+/GOpolavm6BPo+6CbhbKYyZHUDvZ/SmJM7pfDBgdGisFX3bY/CBDMg2ZO4utfhlApkSfZ92yXw7Q==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "openharmony"
+      ]
+    },
+    "node_modules/@rollup/rollup-win32-arm64-msvc": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.2.tgz",
+      "integrity": "sha512-Z9MUCrSgIaUeeHAiNkm3cQyst2UhzjPraR3gYYfOjAuZI7tcFRTOD+4cHLPoS/3qinchth+V56vtqz1Tv+6KPA==",
+      "cpu": [
+        "arm64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "win32"
+      ]
+    },
+    "node_modules/@rollup/rollup-win32-ia32-msvc": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.2.tgz",
+      "integrity": "sha512-+GnYBmpjldD3XQd+HMejo+0gJGwYIOfFeoBQv32xF/RUIvccUz20/V6Otdv+57NE70D5pa8W/jVGDoGq0oON4A==",
+      "cpu": [
+        "ia32"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "win32"
+      ]
+    },
+    "node_modules/@rollup/rollup-win32-x64-gnu": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.2.tgz",
+      "integrity": "sha512-ApXFKluSB6kDQkAqZOKXBjiaqdF1BlKi+/eqnYe9Ee7U2K3pUDKsIyr8EYm/QDHTJIM+4X+lI0gJc3TTRhd+dA==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "win32"
+      ]
+    },
+    "node_modules/@rollup/rollup-win32-x64-msvc": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.2.tgz",
+      "integrity": "sha512-ARz+Bs8kY6FtitYM96PqPEVvPXqEZmPZsSkXvyX19YzDqkCaIlhCieLLMI5hxO9SRZ2XtCtm8wxhy0iJ2jxNfw==",
+      "cpu": [
+        "x64"
+      ],
+      "dev": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "win32"
+      ]
+    },
+    "node_modules/@swc/helpers": {
+      "version": "0.5.17",
+      "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.17.tgz",
+      "integrity": "sha512-5IKx/Y13RsYd+sauPb2x+U/xZikHjolzfuDgTAl/Tdf3Q8rslRvC19NKDLgAJQ6wsqADk10ntlv08nPFw/gO/A==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "tslib": "^2.8.0"
+      }
+    },
+    "node_modules/@types/babel__core": {
+      "version": "7.20.5",
+      "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+      "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/parser": "^7.20.7",
+        "@babel/types": "^7.20.7",
+        "@types/babel__generator": "*",
+        "@types/babel__template": "*",
+        "@types/babel__traverse": "*"
+      }
+    },
+    "node_modules/@types/babel__generator": {
+      "version": "7.27.0",
+      "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
+      "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/types": "^7.0.0"
+      }
+    },
+    "node_modules/@types/babel__template": {
+      "version": "7.4.4",
+      "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+      "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/parser": "^7.1.0",
+        "@babel/types": "^7.0.0"
+      }
+    },
+    "node_modules/@types/babel__traverse": {
+      "version": "7.28.0",
+      "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
+      "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/types": "^7.28.2"
+      }
+    },
+    "node_modules/@types/estree": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+      "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/@types/json-schema": {
+      "version": "7.0.15",
+      "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz",
+      "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/@types/prop-types": {
+      "version": "15.7.15",
+      "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.15.tgz",
+      "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==",
+      "license": "MIT"
+    },
+    "node_modules/@types/react": {
+      "version": "19.1.13",
+      "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.13.tgz",
+      "integrity": "sha512-hHkbU/eoO3EG5/MZkuFSKmYqPbSVk5byPFa3e7y/8TybHiLMACgI8seVYlicwk7H5K/rI2px9xrQp/C+AUDTiQ==",
+      "license": "MIT",
+      "dependencies": {
+        "csstype": "^3.0.2"
+      }
+    },
+    "node_modules/@types/react-dom": {
+      "version": "19.1.9",
+      "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.9.tgz",
+      "integrity": "sha512-qXRuZaOsAdXKFyOhRBg6Lqqc0yay13vN7KrIg4L7N4aaHN68ma9OK3NE1BoDFgFOTfM7zg+3/8+2n8rLUH3OKQ==",
+      "dev": true,
+      "license": "MIT",
+      "peerDependencies": {
+        "@types/react": "^19.0.0"
+      }
+    },
+    "node_modules/@types/react-transition-group": {
+      "version": "4.4.12",
+      "resolved": "https://registry.npmjs.org/@types/react-transition-group/-/react-transition-group-4.4.12.tgz",
+      "integrity": "sha512-8TV6R3h2j7a91c+1DXdJi3Syo69zzIZbz7Lg5tORM5LEJG7X/E6a1V3drRyBRZq7/utz7A+c4OgYLiLcYGHG6w==",
+      "license": "MIT",
+      "peerDependencies": {
+        "@types/react": "*"
+      }
+    },
+    "node_modules/@types/warning": {
+      "version": "3.0.3",
+      "resolved": "https://registry.npmjs.org/@types/warning/-/warning-3.0.3.tgz",
+      "integrity": "sha512-D1XC7WK8K+zZEveUPY+cf4+kgauk8N4eHr/XIHXGlGYkHLud6hK9lYfZk1ry1TNh798cZUCgb6MqGEG8DkJt6Q==",
+      "license": "MIT"
+    },
+    "node_modules/@typescript-eslint/eslint-plugin": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.44.1.tgz",
+      "integrity": "sha512-molgphGqOBT7t4YKCSkbasmu1tb1MgrZ2szGzHbclF7PNmOkSTQVHy+2jXOSnxvR3+Xe1yySHFZoqMpz3TfQsw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@eslint-community/regexpp": "^4.10.0",
+        "@typescript-eslint/scope-manager": "8.44.1",
+        "@typescript-eslint/type-utils": "8.44.1",
+        "@typescript-eslint/utils": "8.44.1",
+        "@typescript-eslint/visitor-keys": "8.44.1",
+        "graphemer": "^1.4.0",
+        "ignore": "^7.0.0",
+        "natural-compare": "^1.4.0",
+        "ts-api-utils": "^2.1.0"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      },
+      "peerDependencies": {
+        "@typescript-eslint/parser": "^8.44.1",
+        "eslint": "^8.57.0 || ^9.0.0",
+        "typescript": ">=4.8.4 <6.0.0"
+      }
+    },
+    "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": {
+      "version": "7.0.5",
+      "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz",
+      "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">= 4"
+      }
+    },
+    "node_modules/@typescript-eslint/parser": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.44.1.tgz",
+      "integrity": "sha512-EHrrEsyhOhxYt8MTg4zTF+DJMuNBzWwgvvOYNj/zm1vnaD/IC5zCXFehZv94Piqa2cRFfXrTFxIvO95L7Qc/cw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@typescript-eslint/scope-manager": "8.44.1",
+        "@typescript-eslint/types": "8.44.1",
+        "@typescript-eslint/typescript-estree": "8.44.1",
+        "@typescript-eslint/visitor-keys": "8.44.1",
+        "debug": "^4.3.4"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      },
+      "peerDependencies": {
+        "eslint": "^8.57.0 || ^9.0.0",
+        "typescript": ">=4.8.4 <6.0.0"
+      }
+    },
+    "node_modules/@typescript-eslint/project-service": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.44.1.tgz",
+      "integrity": "sha512-ycSa60eGg8GWAkVsKV4E6Nz33h+HjTXbsDT4FILyL8Obk5/mx4tbvCNsLf9zret3ipSumAOG89UcCs/KRaKYrA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@typescript-eslint/tsconfig-utils": "^8.44.1",
+        "@typescript-eslint/types": "^8.44.1",
+        "debug": "^4.3.4"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      },
+      "peerDependencies": {
+        "typescript": ">=4.8.4 <6.0.0"
+      }
+    },
+    "node_modules/@typescript-eslint/scope-manager": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.44.1.tgz",
+      "integrity": "sha512-NdhWHgmynpSvyhchGLXh+w12OMT308Gm25JoRIyTZqEbApiBiQHD/8xgb6LqCWCFcxFtWwaVdFsLPQI3jvhywg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@typescript-eslint/types": "8.44.1",
+        "@typescript-eslint/visitor-keys": "8.44.1"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      }
+    },
+    "node_modules/@typescript-eslint/tsconfig-utils": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.44.1.tgz",
+      "integrity": "sha512-B5OyACouEjuIvof3o86lRMvyDsFwZm+4fBOqFHccIctYgBjqR3qT39FBYGN87khcgf0ExpdCBeGKpKRhSFTjKQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      },
+      "peerDependencies": {
+        "typescript": ">=4.8.4 <6.0.0"
+      }
+    },
+    "node_modules/@typescript-eslint/type-utils": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.44.1.tgz",
+      "integrity": "sha512-KdEerZqHWXsRNKjF9NYswNISnFzXfXNDfPxoTh7tqohU/PRIbwTmsjGK6V9/RTYWau7NZvfo52lgVk+sJh0K3g==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@typescript-eslint/types": "8.44.1",
+        "@typescript-eslint/typescript-estree": "8.44.1",
+        "@typescript-eslint/utils": "8.44.1",
+        "debug": "^4.3.4",
+        "ts-api-utils": "^2.1.0"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      },
+      "peerDependencies": {
+        "eslint": "^8.57.0 || ^9.0.0",
+        "typescript": ">=4.8.4 <6.0.0"
+      }
+    },
+    "node_modules/@typescript-eslint/types": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.44.1.tgz",
+      "integrity": "sha512-Lk7uj7y9uQUOEguiDIDLYLJOrYHQa7oBiURYVFqIpGxclAFQ78f6VUOM8lI2XEuNOKNB7XuvM2+2cMXAoq4ALQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      }
+    },
+    "node_modules/@typescript-eslint/typescript-estree": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.44.1.tgz",
+      "integrity": "sha512-qnQJ+mVa7szevdEyvfItbO5Vo+GfZ4/GZWWDRRLjrxYPkhM+6zYB2vRYwCsoJLzqFCdZT4mEqyJoyzkunsZ96A==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@typescript-eslint/project-service": "8.44.1",
+        "@typescript-eslint/tsconfig-utils": "8.44.1",
+        "@typescript-eslint/types": "8.44.1",
+        "@typescript-eslint/visitor-keys": "8.44.1",
+        "debug": "^4.3.4",
+        "fast-glob": "^3.3.2",
+        "is-glob": "^4.0.3",
+        "minimatch": "^9.0.4",
+        "semver": "^7.6.0",
+        "ts-api-utils": "^2.1.0"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      },
+      "peerDependencies": {
+        "typescript": ">=4.8.4 <6.0.0"
+      }
+    },
+    "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+      "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "balanced-match": "^1.0.0"
+      }
+    },
+    "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": {
+      "version": "9.0.5",
+      "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+      "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
+      "dev": true,
+      "license": "ISC",
+      "dependencies": {
+        "brace-expansion": "^2.0.1"
+      },
+      "engines": {
+        "node": ">=16 || 14 >=14.17"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/isaacs"
+      }
+    },
+    "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": {
+      "version": "7.7.2",
+      "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz",
+      "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==",
+      "dev": true,
+      "license": "ISC",
+      "bin": {
+        "semver": "bin/semver.js"
+      },
+      "engines": {
+        "node": ">=10"
+      }
+    },
+    "node_modules/@typescript-eslint/utils": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.44.1.tgz",
+      "integrity": "sha512-DpX5Fp6edTlocMCwA+mHY8Mra+pPjRZ0TfHkXI8QFelIKcbADQz1LUPNtzOFUriBB2UYqw4Pi9+xV4w9ZczHFg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@eslint-community/eslint-utils": "^4.7.0",
+        "@typescript-eslint/scope-manager": "8.44.1",
+        "@typescript-eslint/types": "8.44.1",
+        "@typescript-eslint/typescript-estree": "8.44.1"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      },
+      "peerDependencies": {
+        "eslint": "^8.57.0 || ^9.0.0",
+        "typescript": ">=4.8.4 <6.0.0"
+      }
+    },
+    "node_modules/@typescript-eslint/visitor-keys": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.44.1.tgz",
+      "integrity": "sha512-576+u0QD+Jp3tZzvfRfxon0EA2lzcDt3lhUbsC6Lgzy9x2VR4E+JUiNyGHi5T8vk0TV+fpJ5GLG1JsJuWCaKhw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@typescript-eslint/types": "8.44.1",
+        "eslint-visitor-keys": "^4.2.1"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      }
+    },
+    "node_modules/@vitejs/plugin-react": {
+      "version": "5.0.3",
+      "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.0.3.tgz",
+      "integrity": "sha512-PFVHhosKkofGH0Yzrw1BipSedTH68BFF8ZWy1kfUpCtJcouXXY0+racG8sExw7hw0HoX36813ga5o3LTWZ4FUg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@babel/core": "^7.28.4",
+        "@babel/plugin-transform-react-jsx-self": "^7.27.1",
+        "@babel/plugin-transform-react-jsx-source": "^7.27.1",
+        "@rolldown/pluginutils": "1.0.0-beta.35",
+        "@types/babel__core": "^7.20.5",
+        "react-refresh": "^0.17.0"
+      },
+      "engines": {
+        "node": "^20.19.0 || >=22.12.0"
+      },
+      "peerDependencies": {
+        "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
+      }
+    },
+    "node_modules/acorn": {
+      "version": "8.15.0",
+      "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
+      "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
+      "dev": true,
+      "license": "MIT",
+      "bin": {
+        "acorn": "bin/acorn"
+      },
+      "engines": {
+        "node": ">=0.4.0"
+      }
+    },
+    "node_modules/acorn-jsx": {
+      "version": "5.3.2",
+      "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+      "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+      "dev": true,
+      "license": "MIT",
+      "peerDependencies": {
+        "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
+      }
+    },
+    "node_modules/ajv": {
+      "version": "6.12.6",
+      "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+      "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "fast-deep-equal": "^3.1.1",
+        "fast-json-stable-stringify": "^2.0.0",
+        "json-schema-traverse": "^0.4.1",
+        "uri-js": "^4.2.2"
+      },
+      "funding": {
+        "type": "github",
+        "url": "https://github.com/sponsors/epoberezkin"
+      }
+    },
+    "node_modules/ansi-styles": {
+      "version": "4.3.0",
+      "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+      "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "color-convert": "^2.0.1"
+      },
+      "engines": {
+        "node": ">=8"
+      },
+      "funding": {
+        "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+      }
+    },
+    "node_modules/argparse": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+      "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+      "dev": true,
+      "license": "Python-2.0"
+    },
+    "node_modules/balanced-match": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+      "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/baseline-browser-mapping": {
+      "version": "2.8.6",
+      "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.6.tgz",
+      "integrity": "sha512-wrH5NNqren/QMtKUEEJf7z86YjfqW/2uw3IL3/xpqZUC95SSVIFXYQeeGjL6FT/X68IROu6RMehZQS5foy2BXw==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "bin": {
+        "baseline-browser-mapping": "dist/cli.js"
+      }
+    },
+    "node_modules/boostrap": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/boostrap/-/boostrap-2.0.0.tgz",
+      "integrity": "sha512-JEeFMOweKeGXEM9rt95eaVISOkluG9aKcl0jQCETOVH9jynCZxuBZe2oWgcWJpj5wqYWZl625SnW7OgHT2Ineg==",
+      "deprecated": "Package no longer supported. Contact support@npmjs.com for more info.",
+      "license": "ISC"
+    },
+    "node_modules/bootstrap": {
+      "version": "5.3.8",
+      "resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-5.3.8.tgz",
+      "integrity": "sha512-HP1SZDqaLDPwsNiqRqi5NcP0SSXciX2s9E+RyqJIIqGo+vJeN5AJVM98CXmW/Wux0nQ5L7jeWUdplCEf0Ee+tg==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/twbs"
+        },
+        {
+          "type": "opencollective",
+          "url": "https://opencollective.com/bootstrap"
+        }
+      ],
+      "license": "MIT",
+      "peerDependencies": {
+        "@popperjs/core": "^2.11.8"
+      }
+    },
+    "node_modules/brace-expansion": {
+      "version": "1.1.12",
+      "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+      "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "balanced-match": "^1.0.0",
+        "concat-map": "0.0.1"
+      }
+    },
+    "node_modules/braces": {
+      "version": "3.0.3",
+      "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+      "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "fill-range": "^7.1.1"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/browserslist": {
+      "version": "4.26.2",
+      "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.26.2.tgz",
+      "integrity": "sha512-ECFzp6uFOSB+dcZ5BK/IBaGWssbSYBHvuMeMt3MMFyhI0Z8SqGgEkBLARgpRH3hutIgPVsALcMwbDrJqPxQ65A==",
+      "dev": true,
+      "funding": [
+        {
+          "type": "opencollective",
+          "url": "https://opencollective.com/browserslist"
+        },
+        {
+          "type": "tidelift",
+          "url": "https://tidelift.com/funding/github/npm/browserslist"
+        },
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/ai"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "baseline-browser-mapping": "^2.8.3",
+        "caniuse-lite": "^1.0.30001741",
+        "electron-to-chromium": "^1.5.218",
+        "node-releases": "^2.0.21",
+        "update-browserslist-db": "^1.1.3"
+      },
+      "bin": {
+        "browserslist": "cli.js"
+      },
+      "engines": {
+        "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+      }
+    },
+    "node_modules/callsites": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+      "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/caniuse-lite": {
+      "version": "1.0.30001743",
+      "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001743.tgz",
+      "integrity": "sha512-e6Ojr7RV14Un7dz6ASD0aZDmQPT/A+eZU+nuTNfjqmRrmkmQlnTNWH0SKmqagx9PeW87UVqapSurtAXifmtdmw==",
+      "dev": true,
+      "funding": [
+        {
+          "type": "opencollective",
+          "url": "https://opencollective.com/browserslist"
+        },
+        {
+          "type": "tidelift",
+          "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+        },
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/ai"
+        }
+      ],
+      "license": "CC-BY-4.0"
+    },
+    "node_modules/chalk": {
+      "version": "4.1.2",
+      "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+      "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "ansi-styles": "^4.1.0",
+        "supports-color": "^7.1.0"
+      },
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/chalk/chalk?sponsor=1"
+      }
+    },
+    "node_modules/classnames": {
+      "version": "2.5.1",
+      "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz",
+      "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==",
+      "license": "MIT"
+    },
+    "node_modules/color-convert": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+      "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "color-name": "~1.1.4"
+      },
+      "engines": {
+        "node": ">=7.0.0"
+      }
+    },
+    "node_modules/color-name": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+      "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/concat-map": {
+      "version": "0.0.1",
+      "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+      "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/convert-source-map": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+      "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/cookie": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz",
+      "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=18"
+      }
+    },
+    "node_modules/cross-spawn": {
+      "version": "7.0.6",
+      "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+      "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "path-key": "^3.1.0",
+        "shebang-command": "^2.0.0",
+        "which": "^2.0.1"
+      },
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/csstype": {
+      "version": "3.1.3",
+      "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
+      "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
+      "license": "MIT"
+    },
+    "node_modules/debug": {
+      "version": "4.4.3",
+      "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+      "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "ms": "^2.1.3"
+      },
+      "engines": {
+        "node": ">=6.0"
+      },
+      "peerDependenciesMeta": {
+        "supports-color": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/deep-is": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
+      "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/dequal": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+      "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/dom-helpers": {
+      "version": "5.2.1",
+      "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz",
+      "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==",
+      "license": "MIT",
+      "dependencies": {
+        "@babel/runtime": "^7.8.7",
+        "csstype": "^3.0.2"
+      }
+    },
+    "node_modules/electron-to-chromium": {
+      "version": "1.5.223",
+      "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.223.tgz",
+      "integrity": "sha512-qKm55ic6nbEmagFlTFczML33rF90aU+WtrJ9MdTCThrcvDNdUHN4p6QfVN78U06ZmguqXIyMPyYhw2TrbDUwPQ==",
+      "dev": true,
+      "license": "ISC"
+    },
+    "node_modules/esbuild": {
+      "version": "0.25.10",
+      "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.10.tgz",
+      "integrity": "sha512-9RiGKvCwaqxO2owP61uQ4BgNborAQskMR6QusfWzQqv7AZOg5oGehdY2pRJMTKuwxd1IDBP4rSbI5lHzU7SMsQ==",
+      "dev": true,
+      "hasInstallScript": true,
+      "license": "MIT",
+      "bin": {
+        "esbuild": "bin/esbuild"
+      },
+      "engines": {
+        "node": ">=18"
+      },
+      "optionalDependencies": {
+        "@esbuild/aix-ppc64": "0.25.10",
+        "@esbuild/android-arm": "0.25.10",
+        "@esbuild/android-arm64": "0.25.10",
+        "@esbuild/android-x64": "0.25.10",
+        "@esbuild/darwin-arm64": "0.25.10",
+        "@esbuild/darwin-x64": "0.25.10",
+        "@esbuild/freebsd-arm64": "0.25.10",
+        "@esbuild/freebsd-x64": "0.25.10",
+        "@esbuild/linux-arm": "0.25.10",
+        "@esbuild/linux-arm64": "0.25.10",
+        "@esbuild/linux-ia32": "0.25.10",
+        "@esbuild/linux-loong64": "0.25.10",
+        "@esbuild/linux-mips64el": "0.25.10",
+        "@esbuild/linux-ppc64": "0.25.10",
+        "@esbuild/linux-riscv64": "0.25.10",
+        "@esbuild/linux-s390x": "0.25.10",
+        "@esbuild/linux-x64": "0.25.10",
+        "@esbuild/netbsd-arm64": "0.25.10",
+        "@esbuild/netbsd-x64": "0.25.10",
+        "@esbuild/openbsd-arm64": "0.25.10",
+        "@esbuild/openbsd-x64": "0.25.10",
+        "@esbuild/openharmony-arm64": "0.25.10",
+        "@esbuild/sunos-x64": "0.25.10",
+        "@esbuild/win32-arm64": "0.25.10",
+        "@esbuild/win32-ia32": "0.25.10",
+        "@esbuild/win32-x64": "0.25.10"
+      }
+    },
+    "node_modules/escalade": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+      "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/escape-string-regexp": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+      "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/eslint": {
+      "version": "9.36.0",
+      "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.36.0.tgz",
+      "integrity": "sha512-hB4FIzXovouYzwzECDcUkJ4OcfOEkXTv2zRY6B9bkwjx/cprAq0uvm1nl7zvQ0/TsUk0zQiN4uPfJpB9m+rPMQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@eslint-community/eslint-utils": "^4.8.0",
+        "@eslint-community/regexpp": "^4.12.1",
+        "@eslint/config-array": "^0.21.0",
+        "@eslint/config-helpers": "^0.3.1",
+        "@eslint/core": "^0.15.2",
+        "@eslint/eslintrc": "^3.3.1",
+        "@eslint/js": "9.36.0",
+        "@eslint/plugin-kit": "^0.3.5",
+        "@humanfs/node": "^0.16.6",
+        "@humanwhocodes/module-importer": "^1.0.1",
+        "@humanwhocodes/retry": "^0.4.2",
+        "@types/estree": "^1.0.6",
+        "@types/json-schema": "^7.0.15",
+        "ajv": "^6.12.4",
+        "chalk": "^4.0.0",
+        "cross-spawn": "^7.0.6",
+        "debug": "^4.3.2",
+        "escape-string-regexp": "^4.0.0",
+        "eslint-scope": "^8.4.0",
+        "eslint-visitor-keys": "^4.2.1",
+        "espree": "^10.4.0",
+        "esquery": "^1.5.0",
+        "esutils": "^2.0.2",
+        "fast-deep-equal": "^3.1.3",
+        "file-entry-cache": "^8.0.0",
+        "find-up": "^5.0.0",
+        "glob-parent": "^6.0.2",
+        "ignore": "^5.2.0",
+        "imurmurhash": "^0.1.4",
+        "is-glob": "^4.0.0",
+        "json-stable-stringify-without-jsonify": "^1.0.1",
+        "lodash.merge": "^4.6.2",
+        "minimatch": "^3.1.2",
+        "natural-compare": "^1.4.0",
+        "optionator": "^0.9.3"
+      },
+      "bin": {
+        "eslint": "bin/eslint.js"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "url": "https://eslint.org/donate"
+      },
+      "peerDependencies": {
+        "jiti": "*"
+      },
+      "peerDependenciesMeta": {
+        "jiti": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/eslint-plugin-react-hooks": {
+      "version": "5.2.0",
+      "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.2.0.tgz",
+      "integrity": "sha512-+f15FfK64YQwZdJNELETdn5ibXEUQmW1DZL6KXhNnc2heoy/sg9VJJeT7n8TlMWouzWqSWavFkIhHyIbIAEapg==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=10"
+      },
+      "peerDependencies": {
+        "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0"
+      }
+    },
+    "node_modules/eslint-plugin-react-refresh": {
+      "version": "0.4.21",
+      "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.21.tgz",
+      "integrity": "sha512-MWDWTtNC4voTcWDxXbdmBNe8b/TxfxRFUL6hXgKWJjN9c1AagYEmpiFWBWzDw+5H3SulWUe1pJKTnoSdmk88UA==",
+      "dev": true,
+      "license": "MIT",
+      "peerDependencies": {
+        "eslint": ">=8.40"
+      }
+    },
+    "node_modules/eslint-scope": {
+      "version": "8.4.0",
+      "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz",
+      "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==",
+      "dev": true,
+      "license": "BSD-2-Clause",
+      "dependencies": {
+        "esrecurse": "^4.3.0",
+        "estraverse": "^5.2.0"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "url": "https://opencollective.com/eslint"
+      }
+    },
+    "node_modules/eslint-visitor-keys": {
+      "version": "4.2.1",
+      "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz",
+      "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "url": "https://opencollective.com/eslint"
+      }
+    },
+    "node_modules/espree": {
+      "version": "10.4.0",
+      "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz",
+      "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==",
+      "dev": true,
+      "license": "BSD-2-Clause",
+      "dependencies": {
+        "acorn": "^8.15.0",
+        "acorn-jsx": "^5.3.2",
+        "eslint-visitor-keys": "^4.2.1"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "url": "https://opencollective.com/eslint"
+      }
+    },
+    "node_modules/esquery": {
+      "version": "1.6.0",
+      "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz",
+      "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==",
+      "dev": true,
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "estraverse": "^5.1.0"
+      },
+      "engines": {
+        "node": ">=0.10"
+      }
+    },
+    "node_modules/esrecurse": {
+      "version": "4.3.0",
+      "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+      "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+      "dev": true,
+      "license": "BSD-2-Clause",
+      "dependencies": {
+        "estraverse": "^5.2.0"
+      },
+      "engines": {
+        "node": ">=4.0"
+      }
+    },
+    "node_modules/estraverse": {
+      "version": "5.3.0",
+      "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+      "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+      "dev": true,
+      "license": "BSD-2-Clause",
+      "engines": {
+        "node": ">=4.0"
+      }
+    },
+    "node_modules/esutils": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+      "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+      "dev": true,
+      "license": "BSD-2-Clause",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/fast-deep-equal": {
+      "version": "3.1.3",
+      "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+      "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/fast-glob": {
+      "version": "3.3.3",
+      "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
+      "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@nodelib/fs.stat": "^2.0.2",
+        "@nodelib/fs.walk": "^1.2.3",
+        "glob-parent": "^5.1.2",
+        "merge2": "^1.3.0",
+        "micromatch": "^4.0.8"
+      },
+      "engines": {
+        "node": ">=8.6.0"
+      }
+    },
+    "node_modules/fast-glob/node_modules/glob-parent": {
+      "version": "5.1.2",
+      "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+      "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+      "dev": true,
+      "license": "ISC",
+      "dependencies": {
+        "is-glob": "^4.0.1"
+      },
+      "engines": {
+        "node": ">= 6"
+      }
+    },
+    "node_modules/fast-json-stable-stringify": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+      "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/fast-levenshtein": {
+      "version": "2.0.6",
+      "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+      "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/fastq": {
+      "version": "1.19.1",
+      "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz",
+      "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==",
+      "dev": true,
+      "license": "ISC",
+      "dependencies": {
+        "reusify": "^1.0.4"
+      }
+    },
+    "node_modules/file-entry-cache": {
+      "version": "8.0.0",
+      "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz",
+      "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "flat-cache": "^4.0.0"
+      },
+      "engines": {
+        "node": ">=16.0.0"
+      }
+    },
+    "node_modules/fill-range": {
+      "version": "7.1.1",
+      "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+      "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "to-regex-range": "^5.0.1"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/find-up": {
+      "version": "5.0.0",
+      "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
+      "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "locate-path": "^6.0.0",
+        "path-exists": "^4.0.0"
+      },
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/flat-cache": {
+      "version": "4.0.1",
+      "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz",
+      "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "flatted": "^3.2.9",
+        "keyv": "^4.5.4"
+      },
+      "engines": {
+        "node": ">=16"
+      }
+    },
+    "node_modules/flatted": {
+      "version": "3.3.3",
+      "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz",
+      "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==",
+      "dev": true,
+      "license": "ISC"
+    },
+    "node_modules/fsevents": {
+      "version": "2.3.3",
+      "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+      "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+      "dev": true,
+      "hasInstallScript": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "darwin"
+      ],
+      "engines": {
+        "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+      }
+    },
+    "node_modules/gensync": {
+      "version": "1.0.0-beta.2",
+      "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+      "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/glob-parent": {
+      "version": "6.0.2",
+      "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+      "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+      "dev": true,
+      "license": "ISC",
+      "dependencies": {
+        "is-glob": "^4.0.3"
+      },
+      "engines": {
+        "node": ">=10.13.0"
+      }
+    },
+    "node_modules/globals": {
+      "version": "16.4.0",
+      "resolved": "https://registry.npmjs.org/globals/-/globals-16.4.0.tgz",
+      "integrity": "sha512-ob/2LcVVaVGCYN+r14cnwnoDPUufjiYgSqRhiFD0Q1iI4Odora5RE8Iv1D24hAz5oMophRGkGz+yuvQmmUMnMw==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=18"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/graphemer": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
+      "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/has-flag": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+      "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/ignore": {
+      "version": "5.3.2",
+      "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz",
+      "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">= 4"
+      }
+    },
+    "node_modules/import-fresh": {
+      "version": "3.3.1",
+      "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz",
+      "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "parent-module": "^1.0.0",
+        "resolve-from": "^4.0.0"
+      },
+      "engines": {
+        "node": ">=6"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/imurmurhash": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+      "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.8.19"
+      }
+    },
+    "node_modules/invariant": {
+      "version": "2.2.4",
+      "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz",
+      "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==",
+      "license": "MIT",
+      "dependencies": {
+        "loose-envify": "^1.0.0"
+      }
+    },
+    "node_modules/is-extglob": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+      "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/is-glob": {
+      "version": "4.0.3",
+      "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+      "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "is-extglob": "^2.1.1"
+      },
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/is-number": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+      "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.12.0"
+      }
+    },
+    "node_modules/isexe": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+      "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+      "dev": true,
+      "license": "ISC"
+    },
+    "node_modules/js-tokens": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+      "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+      "license": "MIT"
+    },
+    "node_modules/js-yaml": {
+      "version": "4.1.0",
+      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+      "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "argparse": "^2.0.1"
+      },
+      "bin": {
+        "js-yaml": "bin/js-yaml.js"
+      }
+    },
+    "node_modules/jsesc": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
+      "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
+      "dev": true,
+      "license": "MIT",
+      "bin": {
+        "jsesc": "bin/jsesc"
+      },
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/json-buffer": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz",
+      "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/json-schema-traverse": {
+      "version": "0.4.1",
+      "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+      "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/json-stable-stringify-without-jsonify": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+      "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/json5": {
+      "version": "2.2.3",
+      "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+      "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+      "dev": true,
+      "license": "MIT",
+      "bin": {
+        "json5": "lib/cli.js"
+      },
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/keyv": {
+      "version": "4.5.4",
+      "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
+      "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "json-buffer": "3.0.1"
+      }
+    },
+    "node_modules/levn": {
+      "version": "0.4.1",
+      "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
+      "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "prelude-ls": "^1.2.1",
+        "type-check": "~0.4.0"
+      },
+      "engines": {
+        "node": ">= 0.8.0"
+      }
+    },
+    "node_modules/locate-path": {
+      "version": "6.0.0",
+      "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
+      "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "p-locate": "^5.0.0"
+      },
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/lodash.merge": {
+      "version": "4.6.2",
+      "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
+      "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/loose-envify": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+      "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+      "license": "MIT",
+      "dependencies": {
+        "js-tokens": "^3.0.0 || ^4.0.0"
+      },
+      "bin": {
+        "loose-envify": "cli.js"
+      }
+    },
+    "node_modules/lru-cache": {
+      "version": "5.1.1",
+      "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+      "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+      "dev": true,
+      "license": "ISC",
+      "dependencies": {
+        "yallist": "^3.0.2"
+      }
+    },
+    "node_modules/merge2": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+      "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/micromatch": {
+      "version": "4.0.8",
+      "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+      "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "braces": "^3.0.3",
+        "picomatch": "^2.3.1"
+      },
+      "engines": {
+        "node": ">=8.6"
+      }
+    },
+    "node_modules/minimatch": {
+      "version": "3.1.2",
+      "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+      "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+      "dev": true,
+      "license": "ISC",
+      "dependencies": {
+        "brace-expansion": "^1.1.7"
+      },
+      "engines": {
+        "node": "*"
+      }
+    },
+    "node_modules/ms": {
+      "version": "2.1.3",
+      "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+      "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/nanoid": {
+      "version": "3.3.11",
+      "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+      "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+      "dev": true,
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/ai"
+        }
+      ],
+      "license": "MIT",
+      "bin": {
+        "nanoid": "bin/nanoid.cjs"
+      },
+      "engines": {
+        "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+      }
+    },
+    "node_modules/natural-compare": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+      "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/node-releases": {
+      "version": "2.0.21",
+      "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.21.tgz",
+      "integrity": "sha512-5b0pgg78U3hwXkCM8Z9b2FJdPZlr9Psr9V2gQPESdGHqbntyFJKFW4r5TeWGFzafGY3hzs1JC62VEQMbl1JFkw==",
+      "dev": true,
+      "license": "MIT"
+    },
+    "node_modules/object-assign": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+      "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/optionator": {
+      "version": "0.9.4",
+      "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz",
+      "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "deep-is": "^0.1.3",
+        "fast-levenshtein": "^2.0.6",
+        "levn": "^0.4.1",
+        "prelude-ls": "^1.2.1",
+        "type-check": "^0.4.0",
+        "word-wrap": "^1.2.5"
+      },
+      "engines": {
+        "node": ">= 0.8.0"
+      }
+    },
+    "node_modules/p-limit": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+      "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "yocto-queue": "^0.1.0"
+      },
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/p-locate": {
+      "version": "5.0.0",
+      "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
+      "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "p-limit": "^3.0.2"
+      },
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/parent-module": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+      "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "callsites": "^3.0.0"
+      },
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/path-exists": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+      "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/path-key": {
+      "version": "3.1.1",
+      "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+      "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/picocolors": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+      "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+      "dev": true,
+      "license": "ISC"
+    },
+    "node_modules/picomatch": {
+      "version": "2.3.1",
+      "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+      "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=8.6"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/jonschlinkert"
+      }
+    },
+    "node_modules/postcss": {
+      "version": "8.5.6",
+      "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
+      "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
+      "dev": true,
+      "funding": [
+        {
+          "type": "opencollective",
+          "url": "https://opencollective.com/postcss/"
+        },
+        {
+          "type": "tidelift",
+          "url": "https://tidelift.com/funding/github/npm/postcss"
+        },
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/ai"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "nanoid": "^3.3.11",
+        "picocolors": "^1.1.1",
+        "source-map-js": "^1.2.1"
+      },
+      "engines": {
+        "node": "^10 || ^12 || >=14"
+      }
+    },
+    "node_modules/prelude-ls": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
+      "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.8.0"
+      }
+    },
+    "node_modules/prop-types": {
+      "version": "15.8.1",
+      "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+      "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+      "license": "MIT",
+      "dependencies": {
+        "loose-envify": "^1.4.0",
+        "object-assign": "^4.1.1",
+        "react-is": "^16.13.1"
+      }
+    },
+    "node_modules/prop-types-extra": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/prop-types-extra/-/prop-types-extra-1.1.1.tgz",
+      "integrity": "sha512-59+AHNnHYCdiC+vMwY52WmvP5dM3QLeoumYuEyceQDi9aEhtwN9zIQ2ZNo25sMyXnbh32h+P1ezDsUpUH3JAew==",
+      "license": "MIT",
+      "dependencies": {
+        "react-is": "^16.3.2",
+        "warning": "^4.0.0"
+      },
+      "peerDependencies": {
+        "react": ">=0.14.0"
+      }
+    },
+    "node_modules/punycode": {
+      "version": "2.3.1",
+      "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
+      "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/queue-microtask": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+      "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+      "dev": true,
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/feross"
+        },
+        {
+          "type": "patreon",
+          "url": "https://www.patreon.com/feross"
+        },
+        {
+          "type": "consulting",
+          "url": "https://feross.org/support"
+        }
+      ],
+      "license": "MIT"
+    },
+    "node_modules/react": {
+      "version": "19.1.1",
+      "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz",
+      "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/react-bootstrap": {
+      "version": "2.10.10",
+      "resolved": "https://registry.npmjs.org/react-bootstrap/-/react-bootstrap-2.10.10.tgz",
+      "integrity": "sha512-gMckKUqn8aK/vCnfwoBpBVFUGT9SVQxwsYrp9yDHt0arXMamxALerliKBxr1TPbntirK/HGrUAHYbAeQTa9GHQ==",
+      "license": "MIT",
+      "dependencies": {
+        "@babel/runtime": "^7.24.7",
+        "@restart/hooks": "^0.4.9",
+        "@restart/ui": "^1.9.4",
+        "@types/prop-types": "^15.7.12",
+        "@types/react-transition-group": "^4.4.6",
+        "classnames": "^2.3.2",
+        "dom-helpers": "^5.2.1",
+        "invariant": "^2.2.4",
+        "prop-types": "^15.8.1",
+        "prop-types-extra": "^1.1.0",
+        "react-transition-group": "^4.4.5",
+        "uncontrollable": "^7.2.1",
+        "warning": "^4.0.3"
+      },
+      "peerDependencies": {
+        "@types/react": ">=16.14.8",
+        "react": ">=16.14.0",
+        "react-dom": ">=16.14.0"
+      },
+      "peerDependenciesMeta": {
+        "@types/react": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/react-dom": {
+      "version": "19.1.1",
+      "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz",
+      "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==",
+      "license": "MIT",
+      "dependencies": {
+        "scheduler": "^0.26.0"
+      },
+      "peerDependencies": {
+        "react": "^19.1.1"
+      }
+    },
+    "node_modules/react-is": {
+      "version": "16.13.1",
+      "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+      "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
+      "license": "MIT"
+    },
+    "node_modules/react-lifecycles-compat": {
+      "version": "3.0.4",
+      "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz",
+      "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==",
+      "license": "MIT"
+    },
+    "node_modules/react-refresh": {
+      "version": "0.17.0",
+      "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
+      "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/react-router": {
+      "version": "7.9.2",
+      "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.9.2.tgz",
+      "integrity": "sha512-i2TPp4dgaqrOqiRGLZmqh2WXmbdFknUyiCRmSKs0hf6fWXkTKg5h56b+9F22NbGRAMxjYfqQnpi63egzD2SuZA==",
+      "license": "MIT",
+      "dependencies": {
+        "cookie": "^1.0.1",
+        "set-cookie-parser": "^2.6.0"
+      },
+      "engines": {
+        "node": ">=20.0.0"
+      },
+      "peerDependencies": {
+        "react": ">=18",
+        "react-dom": ">=18"
+      },
+      "peerDependenciesMeta": {
+        "react-dom": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/react-transition-group": {
+      "version": "4.4.5",
+      "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz",
+      "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==",
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "@babel/runtime": "^7.5.5",
+        "dom-helpers": "^5.0.1",
+        "loose-envify": "^1.4.0",
+        "prop-types": "^15.6.2"
+      },
+      "peerDependencies": {
+        "react": ">=16.6.0",
+        "react-dom": ">=16.6.0"
+      }
+    },
+    "node_modules/resolve-from": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+      "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/reusify": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
+      "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "iojs": ">=1.0.0",
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/rollup": {
+      "version": "4.52.2",
+      "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.52.2.tgz",
+      "integrity": "sha512-I25/2QgoROE1vYV+NQ1En9T9UFB9Cmfm2CJ83zZOlaDpvz29wGQSZXWKw7MiNXau7wYgB/T9fVIdIuEQ+KbiiA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@types/estree": "1.0.8"
+      },
+      "bin": {
+        "rollup": "dist/bin/rollup"
+      },
+      "engines": {
+        "node": ">=18.0.0",
+        "npm": ">=8.0.0"
+      },
+      "optionalDependencies": {
+        "@rollup/rollup-android-arm-eabi": "4.52.2",
+        "@rollup/rollup-android-arm64": "4.52.2",
+        "@rollup/rollup-darwin-arm64": "4.52.2",
+        "@rollup/rollup-darwin-x64": "4.52.2",
+        "@rollup/rollup-freebsd-arm64": "4.52.2",
+        "@rollup/rollup-freebsd-x64": "4.52.2",
+        "@rollup/rollup-linux-arm-gnueabihf": "4.52.2",
+        "@rollup/rollup-linux-arm-musleabihf": "4.52.2",
+        "@rollup/rollup-linux-arm64-gnu": "4.52.2",
+        "@rollup/rollup-linux-arm64-musl": "4.52.2",
+        "@rollup/rollup-linux-loong64-gnu": "4.52.2",
+        "@rollup/rollup-linux-ppc64-gnu": "4.52.2",
+        "@rollup/rollup-linux-riscv64-gnu": "4.52.2",
+        "@rollup/rollup-linux-riscv64-musl": "4.52.2",
+        "@rollup/rollup-linux-s390x-gnu": "4.52.2",
+        "@rollup/rollup-linux-x64-gnu": "4.52.2",
+        "@rollup/rollup-linux-x64-musl": "4.52.2",
+        "@rollup/rollup-openharmony-arm64": "4.52.2",
+        "@rollup/rollup-win32-arm64-msvc": "4.52.2",
+        "@rollup/rollup-win32-ia32-msvc": "4.52.2",
+        "@rollup/rollup-win32-x64-gnu": "4.52.2",
+        "@rollup/rollup-win32-x64-msvc": "4.52.2",
+        "fsevents": "~2.3.2"
+      }
+    },
+    "node_modules/run-parallel": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+      "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+      "dev": true,
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/feross"
+        },
+        {
+          "type": "patreon",
+          "url": "https://www.patreon.com/feross"
+        },
+        {
+          "type": "consulting",
+          "url": "https://feross.org/support"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "queue-microtask": "^1.2.2"
+      }
+    },
+    "node_modules/scheduler": {
+      "version": "0.26.0",
+      "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz",
+      "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==",
+      "license": "MIT"
+    },
+    "node_modules/semver": {
+      "version": "6.3.1",
+      "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+      "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+      "dev": true,
+      "license": "ISC",
+      "bin": {
+        "semver": "bin/semver.js"
+      }
+    },
+    "node_modules/set-cookie-parser": {
+      "version": "2.7.1",
+      "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz",
+      "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==",
+      "license": "MIT"
+    },
+    "node_modules/shebang-command": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+      "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "shebang-regex": "^3.0.0"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/shebang-regex": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+      "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/source-map-js": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+      "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+      "dev": true,
+      "license": "BSD-3-Clause",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/strip-json-comments": {
+      "version": "3.1.1",
+      "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+      "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/supports-color": {
+      "version": "7.2.0",
+      "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+      "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "has-flag": "^4.0.0"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/tinyglobby": {
+      "version": "0.2.15",
+      "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
+      "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "fdir": "^6.5.0",
+        "picomatch": "^4.0.3"
+      },
+      "engines": {
+        "node": ">=12.0.0"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/SuperchupuDev"
+      }
+    },
+    "node_modules/tinyglobby/node_modules/fdir": {
+      "version": "6.5.0",
+      "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+      "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=12.0.0"
+      },
+      "peerDependencies": {
+        "picomatch": "^3 || ^4"
+      },
+      "peerDependenciesMeta": {
+        "picomatch": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/tinyglobby/node_modules/picomatch": {
+      "version": "4.0.3",
+      "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+      "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=12"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/jonschlinkert"
+      }
+    },
+    "node_modules/to-regex-range": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+      "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "is-number": "^7.0.0"
+      },
+      "engines": {
+        "node": ">=8.0"
+      }
+    },
+    "node_modules/ts-api-utils": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz",
+      "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=18.12"
+      },
+      "peerDependencies": {
+        "typescript": ">=4.8.4"
+      }
+    },
+    "node_modules/tslib": {
+      "version": "2.8.1",
+      "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+      "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
+      "license": "0BSD"
+    },
+    "node_modules/type-check": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
+      "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "prelude-ls": "^1.2.1"
+      },
+      "engines": {
+        "node": ">= 0.8.0"
+      }
+    },
+    "node_modules/typescript": {
+      "version": "5.8.3",
+      "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz",
+      "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==",
+      "dev": true,
+      "license": "Apache-2.0",
+      "bin": {
+        "tsc": "bin/tsc",
+        "tsserver": "bin/tsserver"
+      },
+      "engines": {
+        "node": ">=14.17"
+      }
+    },
+    "node_modules/typescript-eslint": {
+      "version": "8.44.1",
+      "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.44.1.tgz",
+      "integrity": "sha512-0ws8uWGrUVTjEeN2OM4K1pLKHK/4NiNP/vz6ns+LjT/6sqpaYzIVFajZb1fj/IDwpsrrHb3Jy0Qm5u9CPcKaeg==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "@typescript-eslint/eslint-plugin": "8.44.1",
+        "@typescript-eslint/parser": "8.44.1",
+        "@typescript-eslint/typescript-estree": "8.44.1",
+        "@typescript-eslint/utils": "8.44.1"
+      },
+      "engines": {
+        "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/typescript-eslint"
+      },
+      "peerDependencies": {
+        "eslint": "^8.57.0 || ^9.0.0",
+        "typescript": ">=4.8.4 <6.0.0"
+      }
+    },
+    "node_modules/uncontrollable": {
+      "version": "7.2.1",
+      "resolved": "https://registry.npmjs.org/uncontrollable/-/uncontrollable-7.2.1.tgz",
+      "integrity": "sha512-svtcfoTADIB0nT9nltgjujTi7BzVmwjZClOmskKu/E8FW9BXzg9os8OLr4f8Dlnk0rYWJIWr4wv9eKUXiQvQwQ==",
+      "license": "MIT",
+      "dependencies": {
+        "@babel/runtime": "^7.6.3",
+        "@types/react": ">=16.9.11",
+        "invariant": "^2.2.4",
+        "react-lifecycles-compat": "^3.0.4"
+      },
+      "peerDependencies": {
+        "react": ">=15.0.0"
+      }
+    },
+    "node_modules/update-browserslist-db": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz",
+      "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==",
+      "dev": true,
+      "funding": [
+        {
+          "type": "opencollective",
+          "url": "https://opencollective.com/browserslist"
+        },
+        {
+          "type": "tidelift",
+          "url": "https://tidelift.com/funding/github/npm/browserslist"
+        },
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/ai"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "escalade": "^3.2.0",
+        "picocolors": "^1.1.1"
+      },
+      "bin": {
+        "update-browserslist-db": "cli.js"
+      },
+      "peerDependencies": {
+        "browserslist": ">= 4.21.0"
+      }
+    },
+    "node_modules/uri-js": {
+      "version": "4.4.1",
+      "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+      "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+      "dev": true,
+      "license": "BSD-2-Clause",
+      "dependencies": {
+        "punycode": "^2.1.0"
+      }
+    },
+    "node_modules/vite": {
+      "version": "7.1.7",
+      "resolved": "https://registry.npmjs.org/vite/-/vite-7.1.7.tgz",
+      "integrity": "sha512-VbA8ScMvAISJNJVbRDTJdCwqQoAareR/wutevKanhR2/1EkoXVZVkkORaYm/tNVCjP/UDTKtcw3bAkwOUdedmA==",
+      "dev": true,
+      "license": "MIT",
+      "dependencies": {
+        "esbuild": "^0.25.0",
+        "fdir": "^6.5.0",
+        "picomatch": "^4.0.3",
+        "postcss": "^8.5.6",
+        "rollup": "^4.43.0",
+        "tinyglobby": "^0.2.15"
+      },
+      "bin": {
+        "vite": "bin/vite.js"
+      },
+      "engines": {
+        "node": "^20.19.0 || >=22.12.0"
+      },
+      "funding": {
+        "url": "https://github.com/vitejs/vite?sponsor=1"
+      },
+      "optionalDependencies": {
+        "fsevents": "~2.3.3"
+      },
+      "peerDependencies": {
+        "@types/node": "^20.19.0 || >=22.12.0",
+        "jiti": ">=1.21.0",
+        "less": "^4.0.0",
+        "lightningcss": "^1.21.0",
+        "sass": "^1.70.0",
+        "sass-embedded": "^1.70.0",
+        "stylus": ">=0.54.8",
+        "sugarss": "^5.0.0",
+        "terser": "^5.16.0",
+        "tsx": "^4.8.1",
+        "yaml": "^2.4.2"
+      },
+      "peerDependenciesMeta": {
+        "@types/node": {
+          "optional": true
+        },
+        "jiti": {
+          "optional": true
+        },
+        "less": {
+          "optional": true
+        },
+        "lightningcss": {
+          "optional": true
+        },
+        "sass": {
+          "optional": true
+        },
+        "sass-embedded": {
+          "optional": true
+        },
+        "stylus": {
+          "optional": true
+        },
+        "sugarss": {
+          "optional": true
+        },
+        "terser": {
+          "optional": true
+        },
+        "tsx": {
+          "optional": true
+        },
+        "yaml": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/vite/node_modules/fdir": {
+      "version": "6.5.0",
+      "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
+      "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=12.0.0"
+      },
+      "peerDependencies": {
+        "picomatch": "^3 || ^4"
+      },
+      "peerDependenciesMeta": {
+        "picomatch": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/vite/node_modules/picomatch": {
+      "version": "4.0.3",
+      "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
+      "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=12"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/jonschlinkert"
+      }
+    },
+    "node_modules/warning": {
+      "version": "4.0.3",
+      "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz",
+      "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==",
+      "license": "MIT",
+      "dependencies": {
+        "loose-envify": "^1.0.0"
+      }
+    },
+    "node_modules/which": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+      "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+      "dev": true,
+      "license": "ISC",
+      "dependencies": {
+        "isexe": "^2.0.0"
+      },
+      "bin": {
+        "node-which": "bin/node-which"
+      },
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/word-wrap": {
+      "version": "1.2.5",
+      "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz",
+      "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/yallist": {
+      "version": "3.1.1",
+      "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+      "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+      "dev": true,
+      "license": "ISC"
+    },
+    "node_modules/yocto-queue": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+      "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+      "dev": true,
+      "license": "MIT",
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    }
+  }
+}
diff -pruN 8.17.2-2/examples/quotes/package.json 9.2.0-1/examples/quotes/package.json
--- 8.17.2-2/examples/quotes/package.json	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/package.json	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,36 @@
+{
+  "name": "quotes",
+  "private": true,
+  "version": "0.0.0",
+  "type": "module",
+  "scripts": {
+    "dev": "vite",
+    "build": "tsc -b && vite build",
+    "lint": "eslint .",
+    "preview": "vite preview",
+    "postinstall": "cd backend && python -m venv .venv && .venv/bin/pip install -r requirements.txt",
+    "backend": "cd backend && .venv/bin/uvicorn --port 5000 --reload quotes:app",
+    "ingest": "cd backend && .venv/bin/python quotes.py"
+  },
+  "dependencies": {
+    "boostrap": "^2.0.0",
+    "bootstrap": "^5.3.8",
+    "react": "^19.1.1",
+    "react-bootstrap": "^2.10.10",
+    "react-dom": "^19.1.1",
+    "react-router": "^7.9.2"
+  },
+  "devDependencies": {
+    "@eslint/js": "^9.36.0",
+    "@types/react": "^19.1.13",
+    "@types/react-dom": "^19.1.9",
+    "@vitejs/plugin-react": "^5.0.3",
+    "eslint": "^9.36.0",
+    "eslint-plugin-react-hooks": "^5.2.0",
+    "eslint-plugin-react-refresh": "^0.4.20",
+    "globals": "^16.4.0",
+    "typescript": "~5.8.3",
+    "typescript-eslint": "^8.44.0",
+    "vite": "^7.1.7"
+  }
+}
Binary files 8.17.2-2/examples/quotes/screenshot.png and 9.2.0-1/examples/quotes/screenshot.png differ
diff -pruN 8.17.2-2/examples/quotes/src/App.tsx 9.2.0-1/examples/quotes/src/App.tsx
--- 8.17.2-2/examples/quotes/src/App.tsx	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/src/App.tsx	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,172 @@
+import React, { useRef, useState, useEffect } from 'react';
+import Container from 'react-bootstrap/Container';
+import { NavLink } from 'react-router';
+import Row from 'react-bootstrap/Row';
+import Col from 'react-bootstrap/Col';
+import Form from 'react-bootstrap/Form';
+import Button from 'react-bootstrap/Button';
+import Spinner from 'react-bootstrap/Spinner';
+import Stack from 'react-bootstrap/Stack';
+import CloseButton from 'react-bootstrap/CloseButton';
+import ToggleButton from 'react-bootstrap/ToggleButton';
+import Pagination from 'react-bootstrap/Pagination';
+import type { Quote, Tag } from './models';
+
+export default function App() {
+  const inputRef = useRef<HTMLInputElement>(null);
+  const [knn, setKnn] = useState<boolean>(true);
+  const [query, setQuery] = useState<string>('');
+  const [filters, setFilters] = useState<string[]>([]);
+  const [tags, setTags] = useState<Tag[] | null>([]);
+  const [results, setResults] = useState<Quote[] | null>(null);
+  const [start, setStart] = useState<number>(0);
+  const [total, setTotal] = useState<number>(0);
+
+  useEffect(() => {
+    if (inputRef.current) {
+      inputRef.current.value = query;
+    }
+  }, [query]);
+    
+  const onSearch = (ev: React.FormEvent) => {
+    ev.preventDefault();
+    setQuery(inputRef.current?.value || '');
+    setStart(0);
+  };
+    
+  const onResetQuery = () => {
+    setQuery('');
+    setStart(0);
+    setTags(null);
+    setResults(null);
+    inputRef.current?.focus();
+  };
+
+  const onResetFilters = () => {
+    setFilters([]);
+    setStart(0);
+    setTags(null);
+    setResults(null);
+    inputRef.current?.focus();
+  };
+
+  const onFilter = ({tag, count}: Tag) => {
+    setFilters([...filters, tag]);
+    setStart(0);
+    setTags(null);
+    setResults(null);
+  };
+
+  useEffect(() => {
+    (async () => {
+      const response = await fetch('/api/search', {
+        method: 'POST',
+        headers: {'Content-Type': 'application/json'},
+        body: JSON.stringify({query, filters, knn, start}),
+      });
+      const data: {quotes: Quote[], tags: Tag[], start: number, total: number} = await response.json();
+      setResults(data.quotes);
+      setTags(data.tags.filter(tag => !filters.includes(tag.tag)));
+      setStart(data.start);
+      setTotal(data.total);
+      window.scrollTo(0, 0);
+    })()
+  }, [query, filters, knn, start, total]);
+
+  return (
+    <Container fluid="md" className="App">
+      <Row>
+        <h1>Elasticsearch + Pydantic Demo</h1>
+        <Form onSubmit={onSearch} className="SearchForm">
+          <Stack direction="horizontal" gap={2}>
+            <Form.Control type="text" placeholder="Search for... ?" ref={inputRef} autoFocus={true} />
+            <CloseButton onClick={onResetQuery} disabled={query === ''} />
+            <ToggleButton id="knn" type="checkbox" variant="outline-secondary" checked={knn} value="1" title={knn ? "Using hybrid search" : "Using full-text search"} onChange={e => setKnn(e.currentTarget.checked)}>
+              <svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 256 256"><path d="M192.5,171.47A88.34,88.34,0,0,0,224,101.93c-1-45.71-37.61-83.4-83.24-85.8A88,88,0,0,0,48,102L25.55,145.18c-.09.18-.18.36-.26.54a16,16,0,0,0,7.55,20.62l.25.11L56,176.94V208a16,16,0,0,0,16,16h48a8,8,0,0,0,0-16H72V171.81a8,8,0,0,0-4.67-7.28L40,152l23.07-44.34A7.9,7.9,0,0,0,64,104a72,72,0,0,1,56-70.21V49.38a24,24,0,1,0,16,0V32c1.3,0,2.6,0,3.9.1A72.26,72.26,0,0,1,203.84,80H184a8,8,0,0,0-6.15,2.88L152.34,113.5a24.06,24.06,0,1,0,12.28,10.25L187.75,96h19.79q.36,3.12.44,6.3a72.26,72.26,0,0,1-28.78,59.3,8,8,0,0,0-3.14,7.39l8,64a8,8,0,0,0,7.93,7,8.39,8.39,0,0,0,1-.06,8,8,0,0,0,6.95-8.93ZM128,80a8,8,0,1,1,8-8A8,8,0,0,1,128,80Zm16,64a8,8,0,1,1,8-8A8,8,0,0,1,144,144Z"></path></svg>
+            </ToggleButton>
+            <NavLink to="/quotes/new" className="btn btn-primary">New&nbsp;Quote</NavLink>
+          </Stack>
+        </Form>
+      </Row>
+      <Row>
+        <Col md={3} className="Tags">
+          <>
+            {filters != null && (
+              <>
+                {filters.map(tag => (
+                  <div key={tag} className="Filter">
+                    » {tag}
+                  </div>
+                ))}
+                {(filters.length > 0) && (
+                  <>
+                    <Button variant="link" onClick={onResetFilters}>Reset</Button>
+                    <hr />
+                  </>
+                )}
+              </>
+            )}
+            {(tags === null) ?
+              <Spinner animation="border" role="status">
+                <span className="visually-hidden">Loading...</span>
+              </Spinner>
+            :
+              <>
+                {(tags.length === 0) ?
+                  <p>No tags.</p>
+                :
+                  <>
+                    {tags.map(({tag, count}) => (
+                      <div key={tag} className="Tag">
+                        <Button variant="link" onClick={() => onFilter({tag, count})}>{tag}</Button> ({count})
+                      </div>
+                    ))}
+                  </>
+                }
+              </>
+            }
+          </>
+        </Col>
+        <Col md={9} className="Results">
+          {(results === null) ?
+            <Spinner animation="border" role="status">
+              <span className="visually-hidden">Loading...</span>
+            </Spinner>
+          :
+            <>
+              {(results.length === 0) ?
+                <p>No results. Sorry!</p>
+              :
+                <>
+                  <p className="Summary">Showing results {start + 1}-{start + results.length} of {total}.</p>
+                  {results.map(({quote, author, tags, meta}, index) => (
+                    <div key={index} className="Result">
+                      <p>
+                        <span className="ResultQuote">{quote}</span> — <span className="ResultAuthor">{author}</span>
+                        <br />
+                        <span className="ResultScore">[Score: {meta.score}]</span> <span className="ResultTags">{tags.map(tag => `#${tag}`).join(', ')}</span>
+                        <br />
+                        <small><NavLink to={`/quotes/${meta.id}`}>Edit</NavLink></small>
+                      </p>
+                    </div>
+                  ))}
+                </>
+              }
+              <Pagination>
+                {start > 0 && 
+                  <>
+                    <Pagination.First onClick={() => setStart(0)} />
+                    <Pagination.Prev onClick={() => setStart(start - results.length)} />
+                  </>
+                }
+                {results.length > 0 && start + results.length < total &&
+                  <Pagination.Next onClick={() => setStart(start + results.length)} />
+                }
+              </Pagination>
+            </>
+          }
+        </Col>
+      </Row>
+    </Container>
+  );
+}
diff -pruN 8.17.2-2/examples/quotes/src/Quote.tsx 9.2.0-1/examples/quotes/src/Quote.tsx
--- 8.17.2-2/examples/quotes/src/Quote.tsx	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/src/Quote.tsx	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,104 @@
+import { useState, useEffect, useRef } from 'react';
+import type { MouseEvent } from 'react';
+import { useParams, NavLink, useNavigate } from 'react-router';
+import Container from 'react-bootstrap/Container';
+import Stack from 'react-bootstrap/Stack';
+import Form from 'react-bootstrap/Form';
+import Button from 'react-bootstrap/Button';
+import Toast from 'react-bootstrap/Toast';
+import ToastContainer from 'react-bootstrap/ToastContainer';
+import type { Quote } from './models';
+
+export default function Quote() {
+  const [quote, setQuote] = useState<Quote | null | undefined>(undefined);
+  const [status, setStatus] = useState<str>("");
+  const params = useParams();
+  const navigate = useNavigate();
+  const quoteField = useRef<HTMLInputElement>(null);
+  const authorField = useRef<HTMLInputElement>(null);
+  const tagsField = useRef<HTMLInputElement>(null);
+
+  useEffect(() => {
+    (async () => {
+      const response = await fetch(`/api/quotes/${params.id}`);
+      const data = await response.json();
+      setQuote(data);
+    })();
+  }, []);
+
+  const onSubmit = async (ev: MouseEvent<HTMLElement>) => {
+    ev.preventDefault();
+    if (quote) {
+      quote.quote = quoteField.current?.value ?? '';
+      quote.author = authorField.current?.value ?? '';
+      quote.tags = (tagsField.current?.value ?? "").split(',');
+      let url = '/api/quotes';
+      let method = 'POST';
+      if (params.id !== 'new') {
+        url += `/${params.id}`;
+        method = 'PUT';
+      }
+      const response = await fetch(url, {
+        method: method,
+        headers: {'Content-Type': 'application/json'},
+        body: JSON.stringify(quote),
+      });
+      if (response.status == 200 || response.status == 201) {
+        const data = await response.json();
+        navigate(`/quotes/${data.meta.id}`);
+      }
+      else {
+        setStatus('Save error');
+      }
+    }
+  };
+
+  const onDelete = async (ev: MouseEvent<HTMLElement>) => {
+    ev.preventDefault();
+    const response = await fetch(`/api/quotes/${params.id}`, {
+      method: 'DELETE',
+    });
+    if (response.status == 204) {
+      navigate('/');
+    }
+    else {
+      setStatus('Delete error');
+    }
+  };
+
+  return (
+    <Container fluid="md" className="App">
+      <h1>Elasticsearch + Pydantic Demo</h1>
+      <Form className="position-relative">
+        <ToastContainer position="top-end">
+          <Toast bg="primary" onClose={() => setStatus('')} show={status != ''} delay={3000} autohide>
+            <Toast.Body className="text-white">{status}</Toast.Body>
+          </Toast>
+        </ToastContainer>
+        <Form.Group className="mb-3" controlId="formQuote">
+          <Form.Label>Quote</Form.Label>
+          <Form.Control ref={quoteField} type="text" placeholder="Enter quote" defaultValue={quote?.quote} />
+        </Form.Group>
+        <Form.Group className="mb-3" controlId="formAuthor">
+          <Form.Label>Author</Form.Label>
+          <Form.Control ref={authorField} type="text" placeholder="Enter author" defaultValue={quote?.author} />
+        </Form.Group>
+        <Form.Group className="mb-3" controlId="formAuthor">
+          <Form.Label>Tags</Form.Label>
+          <Form.Control ref={tagsField} type="text" placeholder="Enter tags" defaultValue={quote?.tags} />
+        </Form.Group>
+        <Stack direction="horizontal" gap={2}>
+          <Button variant="primary" type="submit" onClick={onSubmit}>
+            {params.id === 'new' ? 'Save' : 'Update'}
+          </Button>
+          {params.id !== 'new' &&
+            <Button variant="danger" type="submit" onClick={onDelete}>
+              Delete
+            </Button>
+          }
+          <NavLink className="btn btn-secondary" to="/">Back</NavLink>        
+        </Stack>
+      </Form>
+    </Container>
+  );
+}
diff -pruN 8.17.2-2/examples/quotes/src/index.css 9.2.0-1/examples/quotes/src/index.css
--- 8.17.2-2/examples/quotes/src/index.css	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/src/index.css	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,55 @@
+body {
+  margin: 0;
+  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
+    'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
+    sans-serif;
+  -webkit-font-smoothing: antialiased;
+  -moz-osx-font-smoothing: grayscale;
+}
+
+code {
+  font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
+    monospace;
+}
+
+.SearchForm {
+  margin-bottom: 25px;
+}
+
+.Tags {
+  margin-top: 10px;
+}
+
+.Results {
+  margin-top: 10px;
+}
+
+.Results .Summary {
+  font-size: 0.9em;
+  font-style: italic;
+}
+
+.Filter {
+  font-style: italic;
+}
+
+.Tags button {
+  padding: 0px;
+}
+
+.ResultQuote {
+  font-weight: bold;
+}
+
+.ResultAuthor {
+}
+
+.ResultScore {
+  font-size: 0.8em;
+}
+
+.ResultTags {
+  font-size: 0.8em;
+  font-style: italic;
+}
+
diff -pruN 8.17.2-2/examples/quotes/src/main.tsx 9.2.0-1/examples/quotes/src/main.tsx
--- 8.17.2-2/examples/quotes/src/main.tsx	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/src/main.tsx	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,24 @@
+import { StrictMode } from 'react'
+import { createRoot } from 'react-dom/client'
+import { createBrowserRouter, RouterProvider } from 'react-router';
+import 'bootstrap/dist/css/bootstrap.min.css';
+import './index.css'
+import App from './App.tsx'
+import Quote from './Quote.tsx'
+
+const router = createBrowserRouter([
+  {
+    path: '/',
+    element: <App />,
+  },
+  {
+    path: '/quotes/:id',
+    element: <Quote />,
+  },
+]);
+
+createRoot(document.getElementById('root')!).render(
+  <StrictMode>
+    <RouterProvider router={router} />
+  </StrictMode>,
+)
diff -pruN 8.17.2-2/examples/quotes/src/models.tsx 9.2.0-1/examples/quotes/src/models.tsx
--- 8.17.2-2/examples/quotes/src/models.tsx	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/src/models.tsx	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+export interface Meta {
+  id: string;
+  score: number;
+}
+
+export interface Quote {
+  meta: Meta;
+  quote: string;
+  author: string;
+  tags: string[];
+};
+
+export interface Tag {
+  tag: string;
+  count: number;
+};
diff -pruN 8.17.2-2/examples/quotes/tsconfig.app.json 9.2.0-1/examples/quotes/tsconfig.app.json
--- 8.17.2-2/examples/quotes/tsconfig.app.json	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/tsconfig.app.json	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,28 @@
+{
+  "compilerOptions": {
+    "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
+    "target": "ES2022",
+    "useDefineForClassFields": true,
+    "lib": ["ES2022", "DOM", "DOM.Iterable"],
+    "module": "ESNext",
+    "types": ["vite/client"],
+    "skipLibCheck": true,
+
+    /* Bundler mode */
+    "moduleResolution": "bundler",
+    "allowImportingTsExtensions": true,
+    "verbatimModuleSyntax": true,
+    "moduleDetection": "force",
+    "noEmit": true,
+    "jsx": "react-jsx",
+
+    /* Linting */
+    "strict": true,
+    "noUnusedLocals": true,
+    "noUnusedParameters": true,
+    "erasableSyntaxOnly": true,
+    "noFallthroughCasesInSwitch": true,
+    "noUncheckedSideEffectImports": true
+  },
+  "include": ["src"]
+}
diff -pruN 8.17.2-2/examples/quotes/tsconfig.json 9.2.0-1/examples/quotes/tsconfig.json
--- 8.17.2-2/examples/quotes/tsconfig.json	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/tsconfig.json	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,7 @@
+{
+  "files": [],
+  "references": [
+    { "path": "./tsconfig.app.json" },
+    { "path": "./tsconfig.node.json" }
+  ]
+}
diff -pruN 8.17.2-2/examples/quotes/tsconfig.node.json 9.2.0-1/examples/quotes/tsconfig.node.json
--- 8.17.2-2/examples/quotes/tsconfig.node.json	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/tsconfig.node.json	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,26 @@
+{
+  "compilerOptions": {
+    "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
+    "target": "ES2023",
+    "lib": ["ES2023"],
+    "module": "ESNext",
+    "types": [],
+    "skipLibCheck": true,
+
+    /* Bundler mode */
+    "moduleResolution": "bundler",
+    "allowImportingTsExtensions": true,
+    "verbatimModuleSyntax": true,
+    "moduleDetection": "force",
+    "noEmit": true,
+
+    /* Linting */
+    "strict": true,
+    "noUnusedLocals": true,
+    "noUnusedParameters": true,
+    "erasableSyntaxOnly": true,
+    "noFallthroughCasesInSwitch": true,
+    "noUncheckedSideEffectImports": true
+  },
+  "include": ["vite.config.ts"]
+}
diff -pruN 8.17.2-2/examples/quotes/vite.config.ts 9.2.0-1/examples/quotes/vite.config.ts
--- 8.17.2-2/examples/quotes/vite.config.ts	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/examples/quotes/vite.config.ts	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,15 @@
+import { defineConfig } from 'vite'
+import react from '@vitejs/plugin-react'
+
+// https://vite.dev/config/
+export default defineConfig({
+  plugins: [react()],
+  server: {
+    proxy: {
+      '/api': 'http://localhost:5000',
+      '/docs': 'http://localhost:5000',
+      '/redoc': 'http://localhost:5000',
+      '/openapi.json': 'http://localhost:5000',
+    },
+  },
+})
diff -pruN 8.17.2-2/noxfile.py 9.2.0-1/noxfile.py
--- 8.17.2-2/noxfile.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/noxfile.py	2025-10-28 16:50:53.000000000 +0000
@@ -37,7 +37,6 @@ def pytest_argv():
         "pytest",
         "--cov-report=term-missing",
         "--cov=elasticsearch",
-        "--cov-config=setup.cfg",
         f"--junitxml={junit_xml}",
         "--log-level=DEBUG",
         "--cache-clear",
@@ -45,14 +44,14 @@ def pytest_argv():
     ]
 
 
-@nox.session(python=["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"])
+@nox.session(python=["3.10", "3.11", "3.12", "3.13", "3.14"])
 def test(session):
-    session.install(".[dev]", env=INSTALL_ENV, silent=False)
+    session.install("-e", ".[dev]", env=INSTALL_ENV, silent=False)
 
     session.run(*pytest_argv(), *session.posargs)
 
 
-@nox.session(python=["3.8", "3.13"])
+@nox.session(python=["3.10", "3.14"])
 def test_otel(session):
     session.install(
         ".[dev]",
@@ -67,9 +66,13 @@ def test_otel(session):
 
 @nox.session()
 def format(session):
-    session.install("black~=25.0", "isort~=6.0", "flynt", "unasync>=0.6.0", "jinja2")
+    session.install(
+        ".", "black~=25.0", "isort~=6.0", "flynt", "unasync>=0.6.0", "jinja2"
+    )
 
     session.run("python", "utils/run-unasync.py")
+    session.run("python", "utils/run-unasync-dsl.py")
+    session.run("python", "utils/dsl-generator.py", env={"PYTHONPATH": "./"})
     session.run("isort", "--profile=black", *SOURCE_FILES)
     session.run("flynt", *SOURCE_FILES)
     session.run("black", *SOURCE_FILES)
@@ -84,11 +87,19 @@ def lint(session):
     session.run("python", "-c", "from elasticsearch._otel import OpenTelemetry")
 
     session.install(
-        "flake8", "black~=24.0", "mypy", "isort", "types-requests", "unasync>=0.6.0"
+        "flake8",
+        "black~=25.0",
+        "mypy",
+        "pydantic",
+        "isort~=6.0",
+        "types-requests",
+        "types-python-dateutil",
+        "unasync>=0.6.0",
     )
     session.run("isort", "--check", "--profile=black", *SOURCE_FILES)
     session.run("black", "--check", *SOURCE_FILES)
     session.run("python", "utils/run-unasync.py", "--check")
+    session.run("python", "utils/run-unasync-dsl.py", "--check")
     session.run("flake8", *SOURCE_FILES)
     session.run("python", "utils/license-headers.py", "check", *SOURCE_FILES)
 
@@ -104,6 +115,7 @@ def lint(session):
         "--enable-error-code=ignore-without-code",
         "elasticsearch/",
         "test_elasticsearch/test_types/",
+        "examples/dsl/",
     )
 
     # Make sure we don't require aiohttp to be installed for users to
diff -pruN 8.17.2-2/pyproject.toml 9.2.0-1/pyproject.toml
--- 8.17.2-2/pyproject.toml	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/pyproject.toml	2025-10-28 16:50:53.000000000 +0000
@@ -7,7 +7,7 @@ name = "elasticsearch"
 description = "Python client for Elasticsearch"
 readme = "README.md"
 license = "Apache-2.0"
-requires-python = ">=3.8"
+requires-python = ">=3.10"
 authors = [
     { name = "Elastic Client Library Maintainers", email = "client-libs@elastic.co" },
 ]
@@ -21,8 +21,6 @@ classifiers = [
     "Operating System :: OS Independent",
     "Programming Language :: Python",
     "Programming Language :: Python :: 3",
-    "Programming Language :: Python :: 3.8",
-    "Programming Language :: Python :: 3.9",
     "Programming Language :: Python :: 3.10",
     "Programming Language :: Python :: 3.11",
     "Programming Language :: Python :: 3.12",
@@ -42,7 +40,11 @@ keywords = [
 ]
 dynamic = ["version"]
 dependencies = [
-    "elastic-transport>=8.15.1,<9",
+    "elastic-transport>=9.2.0,<10",
+    "python-dateutil",
+    "typing-extensions",
+    "sniffio",
+    "anyio",
 ]
 
 [project.optional-dependencies]
@@ -55,14 +57,17 @@ vectorstore_mmr = ["numpy>=1", "simsimd>
 dev = [
     "requests>=2, <3",
     "aiohttp",
+    "httpx",
     "pytest",
     "pytest-cov",
+    "pytest-mock",
     "pytest-asyncio",
     "coverage",
     "jinja2",
     "python-dateutil",
     "unasync",
     "pyyaml>=5.4",
+    "pydantic",
     "isort",
     "black",
     "twine",
@@ -71,9 +76,16 @@ dev = [
     "orjson",
     "numpy",
     "simsimd",
-    "pyarrow",
+    "pyarrow; python_version<'3.14'",
     "pandas",
     "mapbox-vector-tile",
+    "jinja2",
+    "tqdm",
+    "trio",
+    "mypy",
+    "pyright",
+    "types-python-dateutil",
+    "types-tqdm",
 ]
 docs = [
     "sphinx",
@@ -89,7 +101,7 @@ Homepage = "https://github.com/elastic/e
 
 [tool.hatch.version]
 path = "elasticsearch/_version.py"
-pattern = "__versionstr__ = \"(?P<version>[^']+)\""
+pattern = "__versionstr__ = \"(?P<version>[^']+?)\""
 
 [tool.hatch.build.targets.sdist]
 include = [
@@ -109,7 +121,15 @@ packages = ["elasticsearch"]
 [tool.pytest.ini_options]
 junit_family = "legacy"
 xfail_strict = true
-markers = "otel"
+markers = [
+    "otel",
+    "sync: mark a test as performing I/O without asyncio.",
+]
+filterwarnings = [
+    "ignore:Legacy index templates are deprecated in favor of composable templates.:elasticsearch.exceptions.ElasticsearchWarning",
+    "ignore:datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version..*:DeprecationWarning",
+    "default:enable_cleanup_closed ignored.*:DeprecationWarning",
+]
 
 [tool.isort]
 profile = "black"
@@ -120,4 +140,5 @@ exclude_lines = [
 ]
 
 [tool.mypy]
+plugins = ["pydantic.mypy"]
 ignore_missing_imports = true
diff -pruN 8.17.2-2/setup.cfg 9.2.0-1/setup.cfg
--- 8.17.2-2/setup.cfg	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/setup.cfg	2025-10-28 16:50:53.000000000 +0000
@@ -1,2 +1,2 @@
 [flake8]
-ignore = E203, E266, E501, W503
+ignore = E203, E266, E501, W503, E704, E741
diff -pruN 8.17.2-2/test_elasticsearch/test_async/test_server/conftest.py 9.2.0-1/test_elasticsearch/test_async/test_server/conftest.py
--- 8.17.2-2/test_elasticsearch/test_async/test_server/conftest.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_async/test_server/conftest.py	2025-10-28 16:50:53.000000000 +0000
@@ -16,27 +16,26 @@
 #  under the License.
 
 import pytest
-import pytest_asyncio
+import sniffio
 
 import elasticsearch
 
 from ...utils import CA_CERTS, wipe_cluster
 
-pytestmark = pytest.mark.asyncio
 
-
-@pytest_asyncio.fixture(scope="function")
+@pytest.fixture(scope="function")
 async def async_client_factory(elasticsearch_url):
-
-    if not hasattr(elasticsearch, "AsyncElasticsearch"):
-        pytest.skip("test requires 'AsyncElasticsearch' and aiohttp to be installed")
-
+    kwargs = {}
+    if sniffio.current_async_library() == "trio":
+        kwargs["node_class"] = "httpxasync"
     # Unfortunately the asyncio client needs to be rebuilt every
     # test execution due to how pytest-asyncio manages
     # event loops (one per test!)
     client = None
     try:
-        client = elasticsearch.AsyncElasticsearch(elasticsearch_url, ca_certs=CA_CERTS)
+        client = elasticsearch.AsyncElasticsearch(
+            elasticsearch_url, ca_certs=CA_CERTS, **kwargs
+        )
         yield client
     finally:
         if client:
diff -pruN 8.17.2-2/test_elasticsearch/test_async/test_server/test_clients.py 9.2.0-1/test_elasticsearch/test_async/test_server/test_clients.py
--- 8.17.2-2/test_elasticsearch/test_async/test_server/test_clients.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_async/test_server/test_clients.py	2025-10-28 16:50:53.000000000 +0000
@@ -18,7 +18,7 @@
 
 import pytest
 
-pytestmark = pytest.mark.asyncio
+pytestmark = pytest.mark.anyio
 
 
 @pytest.mark.parametrize("kwargs", [{"body": {"text": "привет"}}, {"text": "привет"}])
diff -pruN 8.17.2-2/test_elasticsearch/test_async/test_server/test_helpers.py 9.2.0-1/test_elasticsearch/test_async/test_server/test_helpers.py
--- 8.17.2-2/test_elasticsearch/test_async/test_server/test_helpers.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_async/test_server/test_helpers.py	2025-10-28 16:50:53.000000000 +0000
@@ -15,20 +15,20 @@
 #  specific language governing permissions and limitations
 #  under the License.
 
-import asyncio
 import logging
+import time
 from datetime import datetime, timedelta, timezone
 from unittest.mock import MagicMock, call, patch
 
+import anyio
 import pytest
-import pytest_asyncio
 from elastic_transport import ApiResponseMeta, ObjectApiResponse
 
 from elasticsearch import helpers
 from elasticsearch.exceptions import ApiError
 from elasticsearch.helpers import ScanError
 
-pytestmark = [pytest.mark.asyncio]
+pytestmark = pytest.mark.anyio
 
 
 class AsyncMock(MagicMock):
@@ -92,7 +92,7 @@ class TestStreamingBulk:
     async def test_documents_data_types(self, async_client):
         async def async_gen():
             for x in range(100):
-                await asyncio.sleep(0)
+                await anyio.sleep(0)
                 yield {"answer": x, "_id": x}
 
         def sync_gen():
@@ -123,6 +123,45 @@ class TestStreamingBulk:
             "_source"
         ]
 
+    async def test_explicit_flushes(self, async_client):
+        async def async_gen():
+            yield {"answer": 2, "_id": 0}
+            yield {"answer": 1, "_id": 1}
+            yield helpers.BULK_FLUSH
+            await anyio.sleep(0.5)
+            yield {"answer": 2, "_id": 2}
+
+        timestamps = []
+        async for ok, item in helpers.async_streaming_bulk(
+            async_client, async_gen(), index="test-index", refresh=True
+        ):
+            timestamps.append(time.time())
+            assert ok
+
+        # make sure there is a pause between the writing of the 2nd and 3rd items
+        assert timestamps[2] - timestamps[1] > (timestamps[1] - timestamps[0]) * 2
+
+    async def test_timeout_flushes(self, async_client):
+        async def async_gen():
+            yield {"answer": 2, "_id": 0}
+            yield {"answer": 1, "_id": 1}
+            await anyio.sleep(0.5)
+            yield {"answer": 2, "_id": 2}
+
+        timestamps = []
+        async for ok, item in helpers.async_streaming_bulk(
+            async_client,
+            async_gen(),
+            index="test-index",
+            refresh=True,
+            flush_after_seconds=0.05,
+        ):
+            assert ok
+            timestamps.append(time.time())
+
+        # make sure there is a pause between the writing of the 2nd and 3rd items
+        assert timestamps[2] - timestamps[1] > (timestamps[1] - timestamps[0]) * 2
+
     async def test_all_errors_from_chunk_are_raised_on_failure(self, async_client):
         await async_client.indices.create(
             index="i",
@@ -491,7 +530,7 @@ class MockResponse:
         return self().__await__()
 
 
-@pytest_asyncio.fixture(scope="function")
+@pytest.fixture(scope="function")
 async def scan_teardown(async_client):
     yield
     await async_client.clear_scroll(scroll_id="_all")
@@ -542,9 +581,10 @@ class TestScan:
             bulk.append({"value": x})
         await async_client.bulk(operations=bulk, refresh=True)
 
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ), patch.object(async_client, "scroll", MockScroll()):
+        with (
+            patch.object(async_client, "options", return_value=async_client),
+            patch.object(async_client, "scroll", MockScroll()),
+        ):
             data = [
                 x
                 async for x in helpers.async_scan(
@@ -558,9 +598,10 @@ class TestScan:
             assert len(data) == 3
             assert data[-1] == {"scroll_data": 42}
 
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ), patch.object(async_client, "scroll", MockScroll()):
+        with (
+            patch.object(async_client, "options", return_value=async_client),
+            patch.object(async_client, "scroll", MockScroll()),
+        ):
             with pytest.raises(ScanError):
                 data = [
                     x
@@ -576,9 +617,10 @@ class TestScan:
             assert data[-1] == {"scroll_data": 42}
 
     async def test_initial_search_error(self, async_client, scan_teardown):
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ), patch.object(async_client, "clear_scroll", new_callable=AsyncMock):
+        with (
+            patch.object(async_client, "options", return_value=async_client),
+            patch.object(async_client, "clear_scroll", new_callable=AsyncMock),
+        ):
             with patch.object(
                 async_client,
                 "search",
@@ -634,15 +676,16 @@ class TestScan:
                         assert mock_scroll.calls == []
 
     async def test_no_scroll_id_fast_route(self, async_client, scan_teardown):
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ), patch.object(async_client, "scroll") as scroll_mock, patch.object(
-            async_client,
-            "search",
-            MockResponse(ObjectApiResponse(body={"no": "_scroll_id"}, meta=None)),
-        ), patch.object(
-            async_client, "clear_scroll"
-        ) as clear_mock:
+        with (
+            patch.object(async_client, "options", return_value=async_client),
+            patch.object(async_client, "scroll") as scroll_mock,
+            patch.object(
+                async_client,
+                "search",
+                MockResponse(ObjectApiResponse(body={"no": "_scroll_id"}, meta=None)),
+            ),
+            patch.object(async_client, "clear_scroll") as clear_mock,
+        ):
             data = [
                 x async for x in helpers.async_scan(async_client, index="test_index")
             ]
@@ -661,9 +704,10 @@ class TestScan:
             bulk.append({"value": x})
         await async_client.bulk(operations=bulk, refresh=True)
 
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ), patch.object(async_client, "scroll", MockScroll()):
+        with (
+            patch.object(async_client, "options", return_value=async_client),
+            patch.object(async_client, "scroll", MockScroll()),
+        ):
             _ = [
                 x
                 async for x in helpers.async_scan(
@@ -680,9 +724,10 @@ class TestScan:
         ]
 
         caplog.clear()
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ), patch.object(async_client, "scroll", MockScroll()):
+        with (
+            patch.object(async_client, "options", return_value=async_client),
+            patch.object(async_client, "scroll", MockScroll()),
+        ):
             with pytest.raises(ScanError):
                 _ = [
                     x
@@ -706,11 +751,12 @@ class TestScan:
             bulk.append({"value": x})
         await async_client.bulk(operations=bulk, refresh=True)
 
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ), patch.object(
-            async_client, "clear_scroll", wraps=async_client.clear_scroll
-        ) as spy:
+        with (
+            patch.object(async_client, "options", return_value=async_client),
+            patch.object(
+                async_client, "clear_scroll", wraps=async_client.clear_scroll
+            ) as spy,
+        ):
             _ = [
                 x
                 async for x in helpers.async_scan(
@@ -748,20 +794,21 @@ class TestScan:
     async def test_scan_auth_kwargs_forwarded(
         self, async_client, scan_teardown, kwargs
     ):
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ) as options, patch.object(
-            async_client,
-            "search",
-            return_value=MockResponse(
-                ObjectApiResponse(
-                    body={
-                        "_scroll_id": "scroll_id",
-                        "_shards": {"successful": 5, "total": 5, "skipped": 0},
-                        "hits": {"hits": [{"search_data": 1}]},
-                    },
-                    meta=None,
-                )
+        with (
+            patch.object(async_client, "options", return_value=async_client) as options,
+            patch.object(
+                async_client,
+                "search",
+                return_value=MockResponse(
+                    ObjectApiResponse(
+                        body={
+                            "_scroll_id": "scroll_id",
+                            "_shards": {"successful": 5, "total": 5, "skipped": 0},
+                            "hits": {"hits": [{"search_data": 1}]},
+                        },
+                        meta=None,
+                    )
+                ),
             ),
         ):
             with patch.object(
@@ -801,20 +848,21 @@ class TestScan:
     async def test_scan_auth_kwargs_favor_scroll_kwargs_option(
         self, async_client, scan_teardown
     ):
-        with patch.object(
-            async_client, "options", return_value=async_client
-        ) as options, patch.object(
-            async_client,
-            "search",
-            return_value=MockResponse(
-                ObjectApiResponse(
-                    body={
-                        "_scroll_id": "scroll_id",
-                        "_shards": {"successful": 5, "total": 5, "skipped": 0},
-                        "hits": {"hits": [{"search_data": 1}]},
-                    },
-                    meta=None,
-                )
+        with (
+            patch.object(async_client, "options", return_value=async_client) as options,
+            patch.object(
+                async_client,
+                "search",
+                return_value=MockResponse(
+                    ObjectApiResponse(
+                        body={
+                            "_scroll_id": "scroll_id",
+                            "_shards": {"successful": 5, "total": 5, "skipped": 0},
+                            "hits": {"hits": [{"search_data": 1}]},
+                        },
+                        meta=None,
+                    )
+                ),
             ),
         ):
             with patch.object(
@@ -878,21 +926,23 @@ class TestScan:
     ],
 )
 async def test_scan_from_keyword_is_aliased(async_client, scan_kwargs):
-    with patch.object(async_client, "options", return_value=async_client), patch.object(
-        async_client,
-        "search",
-        return_value=MockResponse(
-            ObjectApiResponse(
-                body={
-                    "_scroll_id": "dummy_id",
-                    "_shards": {"successful": 5, "total": 5},
-                    "hits": {"hits": []},
-                },
-                meta=None,
-            )
-        ),
-    ) as search_mock, patch.object(
-        async_client, "clear_scroll", return_value=MockResponse(None)
+    with (
+        patch.object(async_client, "options", return_value=async_client),
+        patch.object(
+            async_client,
+            "search",
+            return_value=MockResponse(
+                ObjectApiResponse(
+                    body={
+                        "_scroll_id": "dummy_id",
+                        "_shards": {"successful": 5, "total": 5},
+                        "hits": {"hits": []},
+                    },
+                    meta=None,
+                )
+            ),
+        ) as search_mock,
+        patch.object(async_client, "clear_scroll", return_value=MockResponse(None)),
     ):
         [
             x
@@ -904,7 +954,7 @@ async def test_scan_from_keyword_is_alia
         assert "from" not in search_mock.call_args[1]
 
 
-@pytest_asyncio.fixture(scope="function")
+@pytest.fixture(scope="function")
 async def reindex_setup(async_client):
     bulk = []
     for x in range(100):
@@ -982,7 +1032,7 @@ class TestReindex:
         )["_source"]
 
 
-@pytest_asyncio.fixture(scope="function")
+@pytest.fixture(scope="function")
 async def parent_reindex_setup(async_client):
     body = {
         "settings": {"number_of_shards": 1, "number_of_replicas": 0},
@@ -1043,7 +1093,7 @@ class TestParentChildReindex:
         } == q
 
 
-@pytest_asyncio.fixture(scope="function")
+@pytest.fixture(scope="function")
 async def reindex_data_stream_setup(async_client):
     dt = datetime.now(tz=timezone.utc)
     bulk = []
diff -pruN 8.17.2-2/test_elasticsearch/test_async/test_server/test_mapbox_vector_tile.py 9.2.0-1/test_elasticsearch/test_async/test_server/test_mapbox_vector_tile.py
--- 8.17.2-2/test_elasticsearch/test_async/test_server/test_mapbox_vector_tile.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_async/test_server/test_mapbox_vector_tile.py	2025-10-28 16:50:53.000000000 +0000
@@ -16,14 +16,13 @@
 #  under the License.
 
 import pytest
-import pytest_asyncio
 
 from elasticsearch import RequestError
 
-pytestmark = pytest.mark.asyncio
+pytestmark = pytest.mark.anyio
 
 
-@pytest_asyncio.fixture(scope="function")
+@pytest.fixture(scope="function")
 async def mvt_setup(async_client):
     await async_client.indices.create(
         index="museums",
diff -pruN 8.17.2-2/test_elasticsearch/test_async/test_server/test_rest_api_spec.py 9.2.0-1/test_elasticsearch/test_async/test_server/test_rest_api_spec.py
--- 8.17.2-2/test_elasticsearch/test_async/test_server/test_rest_api_spec.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_async/test_server/test_rest_api_spec.py	2025-10-28 16:50:53.000000000 +0000
@@ -25,7 +25,6 @@ import json
 import warnings
 
 import pytest
-import pytest_asyncio
 
 from elasticsearch import ElasticsearchWarning, RequestError
 
@@ -39,6 +38,8 @@ from ...test_server.test_rest_api_spec i
 )
 from ...utils import parse_version
 
+# We're not using `pytest.mark.anyio` here because it would run the test suite twice,
+# which does not work as it does not fully clean up after itself.
 pytestmark = pytest.mark.asyncio
 
 XPACK_FEATURES = None
@@ -240,7 +241,7 @@ class AsyncYamlRunner(YamlRunner):
         return name in XPACK_FEATURES
 
 
-@pytest_asyncio.fixture(scope="function")
+@pytest.fixture(scope="function")
 def async_runner(async_client_factory):
     return AsyncYamlRunner(async_client_factory)
 
diff -pruN 8.17.2-2/test_elasticsearch/test_async/test_transport.py 9.2.0-1/test_elasticsearch/test_async/test_transport.py
--- 8.17.2-2/test_elasticsearch/test_async/test_transport.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_async/test_transport.py	2025-10-28 16:50:53.000000000 +0000
@@ -16,11 +16,12 @@
 #  under the License.
 
 
-import asyncio
 import re
+import time
 import warnings
-from typing import Any, Dict, Optional, Union
+from typing import Any, Dict, Optional
 
+import anyio
 import pytest
 from elastic_transport import (
     ApiResponseMeta,
@@ -40,8 +41,6 @@ from elasticsearch.exceptions import (
     UnsupportedProductError,
 )
 
-pytestmark = pytest.mark.asyncio
-
 
 class DummyNode(BaseAsyncNode):
     def __init__(self, config: NodeConfig):
@@ -175,6 +174,7 @@ CLUSTER_NODES_MASTER_ONLY = """{
 }"""
 
 
+@pytest.mark.anyio
 class TestTransport:
     async def test_request_timeout_extracted_from_params_and_passed(self):
         client = AsyncElasticsearch(
@@ -256,7 +256,7 @@ class TestTransport:
         calls = client.transport.node_pool.get().calls
         assert 1 == len(calls)
         assert calls[0][1]["headers"] == {
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
     async def test_body_surrogates_replaced_encoded_into_bytes(self):
@@ -378,6 +378,9 @@ class TestTransport:
         assert len(client.transport.node_pool._alive_nodes) == 2
         assert len(client.transport.node_pool._dead_consecutive_failures) == 0
 
+
+@pytest.mark.asyncio
+class TestSniffing:
     @pytest.mark.parametrize(
         ["nodes_info_response", "node_host"],
         [(CLUSTER_NODES, "1.1.1.1"), (CLUSTER_NODES_7x_PUBLISH_HOST, "somehost.tld")],
@@ -426,7 +429,7 @@ class TestTransport:
             {
                 "body": None,
                 "headers": {
-                    "accept": "application/vnd.elasticsearch+json; compatible-with=8"
+                    "accept": "application/vnd.elasticsearch+json; compatible-with=9"
                 },
                 "request_timeout": None,  # <-- Should be None instead of 12
             },
@@ -454,7 +457,7 @@ class TestTransport:
             {
                 "body": None,
                 "headers": {
-                    "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                    "accept": "application/vnd.elasticsearch+json; compatible-with=9",
                 },
                 "request_timeout": DEFAULT,
             },
@@ -464,7 +467,7 @@ class TestTransport:
             {
                 "body": None,
                 "headers": {
-                    "accept": "application/vnd.elasticsearch+json; compatible-with=8"
+                    "accept": "application/vnd.elasticsearch+json; compatible-with=9"
                 },
                 "request_timeout": 12,
             },
@@ -527,23 +530,23 @@ class TestTransport:
         assert request_failed_in_error
         assert len(client.transport.node_pool) == 3
 
-    async def test_sniff_after_n_seconds(self, event_loop):
+    async def test_sniff_after_n_seconds(self):
         client = AsyncElasticsearch(  # noqa: F821
             [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})],
             node_class=DummyNode,
             min_delay_between_sniffing=5,
         )
-        client.transport._last_sniffed_at = event_loop.time()
+        client.transport._last_sniffed_at = time.monotonic()
 
         await client.info()
 
         for _ in range(4):
             await client.info()
-            await asyncio.sleep(0)
+            await anyio.sleep(0)
 
         assert 1 == len(client.transport.node_pool)
 
-        client.transport._last_sniffed_at = event_loop.time() - 5.1
+        client.transport._last_sniffed_at = time.monotonic() - 5.1
 
         await client.info()
         await client.transport._sniffing_task  # Need to wait for the sniffing task to complete
@@ -553,19 +556,17 @@ class TestTransport:
             node.base_url for node in client.transport.node_pool.all()
         )
         assert (
-            event_loop.time() - 1
+            time.monotonic() - 1
             < client.transport._last_sniffed_at
-            < event_loop.time() + 0.01
+            < time.monotonic() + 0.01
         )
 
     @pytest.mark.parametrize(
         "kwargs",
         [
             {"sniff_on_start": True},
-            {"sniff_on_connection_fail": True},
             {"sniff_on_node_failure": True},
             {"sniff_before_requests": True},
-            {"sniffer_timeout": 1},
             {"sniff_timeout": 1},
         ],
     )
@@ -581,7 +582,7 @@ class TestTransport:
             == "Sniffing should not be enabled when connecting to Elastic Cloud"
         )
 
-    async def test_sniff_on_start_close_unlocks_async_calls(self, event_loop):
+    async def test_sniff_on_start_close_unlocks_async_calls(self):
         client = AsyncElasticsearch(  # noqa: F821
             [
                 NodeConfig(
@@ -596,20 +597,17 @@ class TestTransport:
         )
 
         # Start making _async_calls() before we cancel
-        tasks = []
-        start_time = event_loop.time()
-        for _ in range(3):
-            tasks.append(event_loop.create_task(client.info()))
-            await asyncio.sleep(0)
-
-        # Close the transport while the sniffing task is active! :(
-        await client.transport.close()
-
-        # Now we start waiting on all those _async_calls()
-        await asyncio.gather(*tasks)
-        end_time = event_loop.time()
-        duration = end_time - start_time
+        async with anyio.create_task_group() as tg:
+            start_time = time.monotonic()
+            for _ in range(3):
+                tg.start_soon(client.info)
+            await anyio.sleep(0)
 
+            # Close the transport while the sniffing task is active! :(
+            await client.transport.close()
+
+        end_time = time.monotonic()
+        duration = end_time - start_time
         # A lot quicker than 10 seconds defined in 'delay'
         assert duration < 1
 
@@ -660,43 +658,8 @@ class TestTransport:
         ports = {node.config.port for node in client.transport.node_pool.all()}
         assert ports == {9200, 124}
 
-    async def test_sniffing_deprecated_host_info_callback(self):
-        def host_info_callback(
-            node_info: Dict[str, Any], host: Dict[str, Union[int, str]]
-        ) -> Dict[str, Any]:
-            return (
-                host if node_info["http"]["publish_address"].endswith(":124") else None
-            )
-
-        with warnings.catch_warnings(record=True) as w:
-            client = AsyncElasticsearch(  # noqa: F821
-                [
-                    NodeConfig(
-                        "http",
-                        "localhost",
-                        9200,
-                        _extras={"data": CLUSTER_NODES_MASTER_ONLY},
-                    )
-                ],
-                node_class=DummyNode,
-                sniff_on_start=True,
-                host_info_callback=host_info_callback,
-            )
-            await client.transport._async_call()
-
-        assert len(w) == 1
-        assert w[0].category == DeprecationWarning
-        assert (
-            str(w[0].message)
-            == "The 'host_info_callback' parameter is deprecated in favor of 'sniffed_node_callback'"
-        )
-
-        assert len(client.transport.node_pool) == 2
-
-        ports = {node.config.port for node in client.transport.node_pool.all()}
-        assert ports == {9200, 124}
-
 
+@pytest.mark.anyio
 @pytest.mark.parametrize("headers", [{}, {"X-elastic-product": "BAD HEADER"}])
 async def test_unsupported_product_error(headers):
     client = AsyncElasticsearch(
@@ -719,13 +682,14 @@ async def test_unsupported_product_error
         {
             "body": None,
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "request_timeout": DEFAULT,
         },
     )
 
 
+@pytest.mark.anyio
 @pytest.mark.parametrize("status", [401, 403, 413, 500])
 async def test_unsupported_product_error_not_raised_on_non_2xx(status):
     client = AsyncElasticsearch(
@@ -745,6 +709,7 @@ async def test_unsupported_product_error
         assert e.meta.status == status
 
 
+@pytest.mark.anyio
 @pytest.mark.parametrize("status", [404, 500])
 async def test_api_error_raised_before_product_error(status):
     client = AsyncElasticsearch(
@@ -773,6 +738,7 @@ async def test_api_error_raised_before_p
     assert calls[0][0] == ("GET", "/")
 
 
+@pytest.mark.anyio
 @pytest.mark.parametrize(
     "headers",
     [
diff -pruN 8.17.2-2/test_elasticsearch/test_client/test_deprecated_options.py 9.2.0-1/test_elasticsearch/test_client/test_deprecated_options.py
--- 8.17.2-2/test_elasticsearch/test_client/test_deprecated_options.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_client/test_deprecated_options.py	2025-10-28 16:50:53.000000000 +0000
@@ -19,85 +19,7 @@ import warnings
 
 import pytest
 
-from elasticsearch import Elasticsearch, JsonSerializer
-
-EXPECTED_SERIALIZERS = {
-    "application/vnd.mapbox-vector-tile",
-    "application/x-ndjson",
-    "application/json",
-    "text/*",
-    "application/vnd.elasticsearch+json",
-    "application/vnd.elasticsearch+x-ndjson",
-}
-
-
-try:
-    import pyarrow as pa
-
-    EXPECTED_SERIALIZERS.add("application/vnd.apache.arrow.stream")
-except ImportError:
-    pa = None
-
-
-def test_sniff_on_connection_fail():
-    with warnings.catch_warnings(record=True) as w:
-        client = Elasticsearch("http://localhost:9200", sniff_on_connection_fail=True)
-    assert client.transport._sniff_on_node_failure is True
-    assert len(w) == 1
-    assert w[0].category == DeprecationWarning
-    assert str(w[0].message) == (
-        "The 'sniff_on_connection_fail' parameter is deprecated in favor of 'sniff_on_node_failure'"
-    )
-
-    with pytest.raises(ValueError) as e:
-        Elasticsearch(
-            "http://localhost:9200",
-            sniff_on_connection_fail=True,
-            sniff_on_node_failure=True,
-        )
-    assert (
-        str(e.value)
-        == "Can't specify both 'sniff_on_connection_fail' and 'sniff_on_node_failure', instead only specify 'sniff_on_node_failure'"
-    )
-
-
-def test_sniffer_timeout():
-    with warnings.catch_warnings(record=True) as w:
-        client = Elasticsearch("http://localhost:9200", sniffer_timeout=1)
-    assert client.transport._min_delay_between_sniffing == 1
-    assert len(w) == 1
-    assert w[0].category == DeprecationWarning
-    assert str(w[0].message) == (
-        "The 'sniffer_timeout' parameter is deprecated in favor of 'min_delay_between_sniffing'"
-    )
-
-    with pytest.raises(ValueError) as e:
-        Elasticsearch(
-            "http://localhost:9200", sniffer_timeout=1, min_delay_between_sniffing=1
-        )
-    assert (
-        str(e.value)
-        == "Can't specify both 'sniffer_timeout' and 'min_delay_between_sniffing', instead only specify 'min_delay_between_sniffing'"
-    )
-
-
-def test_randomize_hosts():
-    with warnings.catch_warnings(record=True) as w:
-        Elasticsearch("http://localhost:9200", randomize_hosts=True)
-    assert len(w) == 1
-    assert w[0].category == DeprecationWarning
-    assert str(w[0].message) == (
-        "The 'randomize_hosts' parameter is deprecated in favor of 'randomize_nodes_in_pool'"
-    )
-
-    with pytest.raises(ValueError) as e:
-        Elasticsearch(
-            "http://localhost:9200", randomize_hosts=True, randomize_nodes_in_pool=True
-        )
-    assert (
-        str(e.value)
-        == "Can't specify both 'randomize_hosts' and 'randomize_nodes_in_pool', instead only specify 'randomize_nodes_in_pool'"
-    )
+from elasticsearch import Elasticsearch
 
 
 def test_http_auth():
@@ -124,40 +46,3 @@ def test_http_auth():
         str(e.value)
         == "Can't specify both 'http_auth' and 'basic_auth', instead only specify 'basic_auth'"
     )
-
-
-def test_serializer_and_serializers():
-    with pytest.raises(ValueError) as e:
-        Elasticsearch(
-            "http://localhost:9200",
-            serializer=JsonSerializer(),
-            serializers={"application/json": JsonSerializer()},
-        )
-    assert str(e.value) == (
-        "Can't specify both 'serializer' and 'serializers' parameters together. "
-        "Instead only specify one of the other."
-    )
-
-    class CustomSerializer(JsonSerializer):
-        pass
-
-    client = Elasticsearch("http://localhost:9200", serializer=CustomSerializer())
-    assert isinstance(
-        client.transport.serializers.get_serializer("application/json"),
-        CustomSerializer,
-    )
-    assert set(client.transport.serializers.serializers.keys()) == EXPECTED_SERIALIZERS
-
-    client = Elasticsearch(
-        "http://localhost:9200",
-        serializers={
-            "application/json": CustomSerializer(),
-            "application/cbor": CustomSerializer(),
-        },
-    )
-    assert isinstance(
-        client.transport.serializers.get_serializer("application/json"),
-        CustomSerializer,
-    )
-    expected = EXPECTED_SERIALIZERS | {"application/cbor"}
-    assert set(client.transport.serializers.serializers.keys()) == expected
diff -pruN 8.17.2-2/test_elasticsearch/test_client/test_options.py 9.2.0-1/test_elasticsearch/test_client/test_options.py
--- 8.17.2-2/test_elasticsearch/test_client/test_options.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_client/test_options.py	2025-10-28 16:50:53.000000000 +0000
@@ -19,7 +19,7 @@ import pytest
 from elastic_transport import OpenTelemetrySpan
 from elastic_transport.client_utils import DEFAULT
 
-from elasticsearch import AsyncElasticsearch, Elasticsearch
+from elasticsearch import AsyncElasticsearch, Elasticsearch, JsonSerializer
 from elasticsearch._sync.client.utils import USER_AGENT
 from test_elasticsearch.test_cases import (
     DummyAsyncTransport,
@@ -27,6 +27,22 @@ from test_elasticsearch.test_cases impor
     DummyTransportTestCase,
 )
 
+EXPECTED_SERIALIZERS = {
+    "application/vnd.mapbox-vector-tile",
+    "application/x-ndjson",
+    "application/json",
+    "text/*",
+    "application/vnd.elasticsearch+json",
+    "application/vnd.elasticsearch+x-ndjson",
+}
+
+try:
+    import pyarrow as pa
+
+    EXPECTED_SERIALIZERS.add("application/vnd.apache.arrow.stream")
+except ImportError:
+    pa = None
+
 
 class TestOptions(DummyTransportTestCase):
     def assert_called_with_headers(self, client, method, target, headers):
@@ -140,7 +156,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
         }
@@ -159,7 +175,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
             "request_timeout": 1,
@@ -185,7 +201,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
             "request_timeout": 1,
@@ -213,7 +229,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
         }
@@ -232,7 +248,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
             "request_timeout": 1,
@@ -258,7 +274,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
             "request_timeout": 1,
@@ -300,7 +316,7 @@ class TestOptions(DummyTransportTestCase
 
         assert call["headers"] == {
             "key": "val",
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         client.options(headers={"key1": "val"}).indices.get(index="2")
@@ -309,7 +325,7 @@ class TestOptions(DummyTransportTestCase
         assert call["headers"] == {
             "key": "val",
             "key1": "val",
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         client.options(headers={"key": "val2"}).indices.get(index="3")
@@ -317,7 +333,7 @@ class TestOptions(DummyTransportTestCase
 
         assert call["headers"] == {
             "key": "val2",
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         client = Elasticsearch(
@@ -344,14 +360,14 @@ class TestOptions(DummyTransportTestCase
         call = calls[("GET", "/1")][0]
         assert call["headers"] == {
             "user-agent": "custom1",
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         client.indices.get(index="2", headers={"user-agent": "custom2"})
         call = calls[("GET", "/2")][0]
         assert call["headers"] == {
             "user-agent": "custom2",
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         client = Elasticsearch(
@@ -365,14 +381,14 @@ class TestOptions(DummyTransportTestCase
         call = calls[("GET", "/1")][0]
         assert call["headers"] == {
             "user-agent": "custom3",
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         client.indices.get(index="2", headers={"user-agent": "custom4"})
         call = calls[("GET", "/2")][0]
         assert call["headers"] == {
             "user-agent": "custom4",
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
     def test_options_timeout_parameters(self):
@@ -394,7 +410,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
             "request_timeout": 1,
@@ -424,7 +440,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
             "request_timeout": 2,
@@ -449,7 +465,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
         }
@@ -471,7 +487,7 @@ class TestOptions(DummyTransportTestCase
         assert isinstance(call.pop("otel_span"), OpenTelemetrySpan)
         assert call == {
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "body": None,
             "request_timeout": 1,
@@ -479,3 +495,41 @@ class TestOptions(DummyTransportTestCase
             "retry_on_status": (404,),
             "retry_on_timeout": True,
         }
+
+    def test_serializer_and_serializers(self):
+        with pytest.raises(ValueError) as e:
+            Elasticsearch(
+                "http://localhost:9200",
+                serializer=JsonSerializer(),
+                serializers={"application/json": JsonSerializer()},
+            )
+        assert str(e.value) == (
+            "Can't specify both 'serializer' and 'serializers' parameters together. "
+            "Instead only specify one of the other."
+        )
+
+        class CustomSerializer(JsonSerializer):
+            pass
+
+        client = Elasticsearch("http://localhost:9200", serializer=CustomSerializer())
+        assert isinstance(
+            client.transport.serializers.get_serializer("application/json"),
+            CustomSerializer,
+        )
+        assert (
+            set(client.transport.serializers.serializers.keys()) == EXPECTED_SERIALIZERS
+        )
+
+        client = Elasticsearch(
+            "http://localhost:9200",
+            serializers={
+                "application/json": CustomSerializer(),
+                "application/cbor": CustomSerializer(),
+            },
+        )
+        assert isinstance(
+            client.transport.serializers.get_serializer("application/json"),
+            CustomSerializer,
+        )
+        expected = EXPECTED_SERIALIZERS | {"application/cbor"}
+        assert set(client.transport.serializers.serializers.keys()) == expected
diff -pruN 8.17.2-2/test_elasticsearch/test_client/test_serializers.py 9.2.0-1/test_elasticsearch/test_client/test_serializers.py
--- 8.17.2-2/test_elasticsearch/test_client/test_serializers.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_client/test_serializers.py	2025-10-28 16:50:53.000000000 +0000
@@ -46,22 +46,22 @@ class TestSerializers(DummyTransportTest
         self.client.get(index="test0", id="1")
         assert len(calls) == 1
         assert calls[("GET", "/test0/_doc/1")][0]["headers"] == {
-            "Accept": "application/vnd.elasticsearch+json; compatible-with=8"
+            "Accept": "application/vnd.elasticsearch+json; compatible-with=9"
         }
 
         # Search with body
         self.client.search(index="test1", query={"match_all": {}})
         assert len(calls) == 2
         assert calls[("POST", "/test1/_search")][0]["headers"] == {
-            "Accept": "application/vnd.elasticsearch+json; compatible-with=8",
-            "Content-Type": "application/vnd.elasticsearch+json; compatible-with=8",
+            "Accept": "application/vnd.elasticsearch+json; compatible-with=9",
+            "Content-Type": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         # Search without body
         self.client.search(index="test2")
         assert len(calls) == 3
         assert calls[("POST", "/test2/_search")][0]["headers"] == {
-            "Accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "Accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         # Multiple mimetypes in Accept
@@ -69,15 +69,15 @@ class TestSerializers(DummyTransportTest
         assert len(calls) == 4
         assert calls[("GET", "/_cat/nodes")][0]["headers"] == {
             # text/plain isn't modified.
-            "Accept": "text/plain,application/vnd.elasticsearch+json; compatible-with=8",
+            "Accept": "text/plain,application/vnd.elasticsearch+json; compatible-with=9",
         }
 
         # Bulk uses x-ndjson
         self.client.bulk(operations=[])
         assert len(calls) == 5
         assert calls[("PUT", "/_bulk")][0]["headers"] == {
-            "Accept": "application/vnd.elasticsearch+json; compatible-with=8",
-            "Content-Type": "application/vnd.elasticsearch+x-ndjson; compatible-with=8",
+            "Accept": "application/vnd.elasticsearch+json; compatible-with=9",
+            "Content-Type": "application/vnd.elasticsearch+x-ndjson; compatible-with=9",
         }
 
         # Mapbox vector tiles
@@ -91,8 +91,8 @@ class TestSerializers(DummyTransportTest
         )
         assert len(calls) == 6
         assert calls[("POST", "/test3/_mvt/field/z/x/y")][0]["headers"] == {
-            "Accept": "application/vnd.elasticsearch+vnd.mapbox-vector-tile; compatible-with=8",
-            "Content-Type": "application/vnd.elasticsearch+json; compatible-with=8",
+            "Accept": "application/vnd.elasticsearch+vnd.mapbox-vector-tile; compatible-with=9",
+            "Content-Type": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
     @pytest.mark.parametrize("mime_subtype", ["json", "x-ndjson"])
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_async/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/_async/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_async/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_async/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_async/test_document.py 9.2.0-1/test_elasticsearch/test_dsl/_async/test_document.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_async/test_document.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_async/test_document.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,1021 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+# this file creates several documents using bad or no types because
+# these are still supported and should be kept functional in spite
+# of not having appropriate type hints. For that reason the comment
+# below disables many mypy checks that fails as a result of this.
+# mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined"
+
+import codecs
+import ipaddress
+import pickle
+import sys
+from datetime import datetime
+from hashlib import md5
+from typing import Annotated, Any, ClassVar, Dict, List, Optional
+
+import pydantic
+import pytest
+from pytest import raises
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    Index,
+    InnerDoc,
+    Keyword,
+    M,
+    Mapping,
+    MetaField,
+    Range,
+    analyzer,
+    field,
+    mapped_field,
+    utils,
+)
+from elasticsearch.dsl.document_base import InstrumentedField
+from elasticsearch.dsl.exceptions import IllegalOperation, ValidationException
+from elasticsearch.dsl.pydantic import AsyncBaseESModel
+
+
+class MyInner(InnerDoc):
+    old_field = field.Text()
+
+
+class MyDoc(AsyncDocument):
+    title = field.Keyword()
+    name = field.Text()
+    created_at = field.Date()
+    inner = field.Object(MyInner)
+
+
+class MySubDoc(MyDoc):
+    name = field.Keyword()
+
+    class Index:
+        name = "default-index"
+
+
+class MyDoc2(AsyncDocument):
+    extra = field.Long()
+
+
+class MyMultiSubDoc(MyDoc2, MySubDoc):
+    pass
+
+
+class Comment(InnerDoc):
+    title = field.Text()
+    tags = field.Keyword(multi=True)
+
+
+class DocWithNested(AsyncDocument):
+    comments = field.Nested(Comment)
+
+    class Index:
+        name = "test-doc-with-nested"
+
+
+class SimpleCommit(AsyncDocument):
+    files = field.Text(multi=True)
+
+    class Index:
+        name = "test-git"
+
+
+class Secret(str):
+    pass
+
+
+class SecretField(field.CustomField):
+    builtin_type = "text"
+
+    def _serialize(self, data: Any) -> Any:
+        return codecs.encode(data, "rot_13")
+
+    def _deserialize(self, data: Any) -> Any:
+        if isinstance(data, Secret):
+            return data
+        return Secret(codecs.decode(data, "rot_13"))
+
+
+class SecretDoc(AsyncDocument):
+    title = SecretField(index="no")
+
+    class Index:
+        name = "test-secret-doc"
+
+
+class NestedSecret(AsyncDocument):
+    secrets = field.Nested(SecretDoc)
+
+    class Index:
+        name = "test-nested-secret"
+
+
+class OptionalObjectWithRequiredField(AsyncDocument):
+    comments = field.Nested(properties={"title": field.Keyword(required=True)})
+
+    class Index:
+        name = "test-required"
+
+
+class Host(AsyncDocument):
+    ip = field.Ip()
+
+    class Index:
+        name = "test-host"
+
+
+def test_range_serializes_properly() -> None:
+    class D(AsyncDocument):
+        lr: Range[int] = field.LongRange()
+
+    d = D(lr=Range(lt=42))
+    assert 40 in d.lr
+    assert 47 not in d.lr
+    assert {"lr": {"lt": 42}} == d.to_dict()
+
+    d = D(lr={"lt": 42})
+    assert {"lr": {"lt": 42}} == d.to_dict()
+
+
+def test_range_deserializes_properly() -> None:
+    class D(InnerDoc):
+        lr = field.LongRange()
+
+    d = D.from_es({"lr": {"lt": 42}}, True)
+    assert isinstance(d.lr, Range)
+    assert 40 in d.lr
+    assert 47 not in d.lr
+
+
+def test_resolve_nested() -> None:
+    nested, field = NestedSecret._index.resolve_nested("secrets.title")
+    assert nested == ["secrets"]
+    assert field is NestedSecret._doc_type.mapping["secrets"]["title"]
+
+
+def test_conflicting_mapping_raises_error_in_index_to_dict() -> None:
+    class A(AsyncDocument):
+        name = field.Text()
+
+    class B(AsyncDocument):
+        name = field.Keyword()
+
+    i = Index("i")
+    i.document(A)
+    i.document(B)
+
+    with raises(ValueError):
+        i.to_dict()
+
+
+def test_ip_address_serializes_properly() -> None:
+    host = Host(ip=ipaddress.IPv4Address("10.0.0.1"))
+
+    assert {"ip": "10.0.0.1"} == host.to_dict()
+
+
+def test_matches_uses_index() -> None:
+    assert SimpleCommit._matches({"_index": "test-git"})
+    assert not SimpleCommit._matches({"_index": "not-test-git"})
+
+
+def test_matches_with_no_name_always_matches() -> None:
+    class D(AsyncDocument):
+        pass
+
+    assert D._matches({})
+    assert D._matches({"_index": "whatever"})
+
+
+def test_matches_accepts_wildcards() -> None:
+    class MyDoc(AsyncDocument):
+        class Index:
+            name = "my-*"
+
+    assert MyDoc._matches({"_index": "my-index"})
+    assert not MyDoc._matches({"_index": "not-my-index"})
+
+
+def test_assigning_attrlist_to_field() -> None:
+    sc = SimpleCommit()
+    l = ["README", "README.rst"]
+    sc.files = utils.AttrList(l)
+
+    assert sc.to_dict()["files"] is l
+
+
+def test_optional_inner_objects_are_not_validated_if_missing() -> None:
+    d = OptionalObjectWithRequiredField()
+
+    d.full_clean()
+
+
+def test_custom_field() -> None:
+    s = SecretDoc(title=Secret("Hello"))
+
+    assert {"title": "Uryyb"} == s.to_dict()
+    assert s.title == "Hello"
+
+    s = SecretDoc.from_es({"_source": {"title": "Uryyb"}})
+    assert s.title == "Hello"
+    assert isinstance(s.title, Secret)
+
+
+def test_custom_field_mapping() -> None:
+    assert {
+        "properties": {"title": {"index": "no", "type": "text"}}
+    } == SecretDoc._doc_type.mapping.to_dict()
+
+
+def test_custom_field_in_nested() -> None:
+    s = NestedSecret()
+    s.secrets.append(SecretDoc(title=Secret("Hello")))
+
+    assert {"secrets": [{"title": "Uryyb"}]} == s.to_dict()
+    assert s.secrets[0].title == "Hello"
+
+
+def test_multi_works_after_doc_has_been_saved() -> None:
+    c = SimpleCommit()
+    c.full_clean()
+    c.files.append("setup.py")
+
+    assert c.to_dict() == {"files": ["setup.py"]}
+
+
+def test_multi_works_in_nested_after_doc_has_been_serialized() -> None:
+    # Issue #359
+    c = DocWithNested(comments=[Comment(title="First!")])
+
+    assert [] == c.comments[0].tags
+    assert {"comments": [{"title": "First!"}]} == c.to_dict()
+    assert [] == c.comments[0].tags
+
+
+def test_null_value_for_object() -> None:
+    d = MyDoc(inner=None)
+
+    assert d.inner is None
+
+
+def test_inherited_doc_types_can_override_index() -> None:
+    class MyDocDifferentIndex(MySubDoc):
+        class Index:
+            name = "not-default-index"
+            settings = {"number_of_replicas": 0}
+            aliases: Dict[str, Any] = {"a": {}}
+            analyzers = [analyzer("my_analizer", tokenizer="keyword")]
+
+    assert MyDocDifferentIndex._index._name == "not-default-index"
+    assert MyDocDifferentIndex()._get_index() == "not-default-index"
+    assert MyDocDifferentIndex._index.to_dict() == {
+        "aliases": {"a": {}},
+        "mappings": {
+            "properties": {
+                "created_at": {"type": "date"},
+                "inner": {
+                    "type": "object",
+                    "properties": {"old_field": {"type": "text"}},
+                },
+                "name": {"type": "keyword"},
+                "title": {"type": "keyword"},
+            }
+        },
+        "settings": {
+            "analysis": {
+                "analyzer": {"my_analizer": {"tokenizer": "keyword", "type": "custom"}}
+            },
+            "number_of_replicas": 0,
+        },
+    }
+
+
+def test_to_dict_with_meta() -> None:
+    d = MySubDoc(title="hello")
+    d.meta.routing = "some-parent"
+
+    assert {
+        "_index": "default-index",
+        "_routing": "some-parent",
+        "_source": {"title": "hello"},
+    } == d.to_dict(True)
+
+
+def test_to_dict_with_meta_includes_custom_index() -> None:
+    d = MySubDoc(title="hello")
+    d.meta.index = "other-index"
+
+    assert {"_index": "other-index", "_source": {"title": "hello"}} == d.to_dict(True)
+
+
+def test_to_dict_without_skip_empty_will_include_empty_fields() -> None:
+    d = MySubDoc(tags=[], title=None, inner={})
+
+    assert {} == d.to_dict()
+    assert {"tags": [], "title": None, "inner": {}} == d.to_dict(skip_empty=False)
+
+
+def test_attribute_can_be_removed() -> None:
+    d = MyDoc(title="hello")
+
+    del d.title
+    assert "title" not in d._d_
+
+
+def test_doc_type_can_be_correctly_pickled() -> None:
+    d = DocWithNested(
+        title="Hello World!", comments=[Comment(title="hellp")], meta={"id": 42}
+    )
+    s = pickle.dumps(d)
+
+    d2 = pickle.loads(s)
+
+    assert d2 == d
+    assert 42 == d2.meta.id
+    assert "Hello World!" == d2.title
+    assert [{"title": "hellp"}] == d2.comments
+    assert isinstance(d2.comments[0], Comment)
+
+
+def test_meta_is_accessible_even_on_empty_doc() -> None:
+    d = MyDoc()
+    d.meta
+
+    d = MyDoc(title="aaa")
+    d.meta
+
+
+def test_meta_field_mapping() -> None:
+    class User(AsyncDocument):
+        username = field.Text()
+
+        class Meta:
+            all = MetaField(enabled=False)
+            _index = MetaField(enabled=True)
+            dynamic = MetaField("strict")
+            dynamic_templates = MetaField([42])
+
+    assert {
+        "properties": {"username": {"type": "text"}},
+        "_all": {"enabled": False},
+        "_index": {"enabled": True},
+        "dynamic": "strict",
+        "dynamic_templates": [42],
+    } == User._doc_type.mapping.to_dict()
+
+
+def test_multi_value_fields() -> None:
+    class Blog(AsyncDocument):
+        tags = field.Keyword(multi=True)
+
+    b = Blog()
+    assert [] == b.tags
+    b.tags.append("search")
+    b.tags.append("python")
+    assert ["search", "python"] == b.tags
+
+
+def test_docs_with_properties() -> None:
+    class User(AsyncDocument):
+        pwd_hash: str = field.Text()
+
+        def check_password(self, pwd: bytes) -> bool:
+            return md5(pwd).hexdigest() == self.pwd_hash
+
+        @property
+        def password(self) -> None:
+            raise AttributeError("readonly")
+
+        @password.setter
+        def password(self, pwd: bytes) -> None:
+            self.pwd_hash = md5(pwd).hexdigest()
+
+    u = User(pwd_hash=md5(b"secret").hexdigest())
+    assert u.check_password(b"secret")
+    assert not u.check_password(b"not-secret")
+
+    u.password = b"not-secret"
+    assert "password" not in u._d_
+    assert not u.check_password(b"secret")
+    assert u.check_password(b"not-secret")
+
+    with raises(AttributeError):
+        u.password
+
+
+def test_nested_can_be_assigned_to() -> None:
+    d1 = DocWithNested(comments=[Comment(title="First!")])
+    d2 = DocWithNested()
+
+    d2.comments = d1.comments
+    assert isinstance(d1.comments[0], Comment)
+    assert d2.comments == [{"title": "First!"}]
+    assert {"comments": [{"title": "First!"}]} == d2.to_dict()
+    assert isinstance(d2.comments[0], Comment)
+
+
+def test_nested_can_be_none() -> None:
+    d = DocWithNested(comments=None, title="Hello World!")
+
+    assert {"title": "Hello World!"} == d.to_dict()
+
+
+def test_nested_defaults_to_list_and_can_be_updated() -> None:
+    md = DocWithNested()
+
+    assert [] == md.comments
+
+    md.comments.append({"title": "hello World!"})
+    assert {"comments": [{"title": "hello World!"}]} == md.to_dict()
+
+
+def test_to_dict_is_recursive_and_can_cope_with_multi_values() -> None:
+    md = MyDoc(name=["a", "b", "c"])
+    md.inner = [MyInner(old_field="of1"), MyInner(old_field="of2")]
+
+    assert isinstance(md.inner[0], MyInner)
+
+    assert {
+        "name": ["a", "b", "c"],
+        "inner": [{"old_field": "of1"}, {"old_field": "of2"}],
+    } == md.to_dict()
+
+
+def test_to_dict_ignores_empty_collections() -> None:
+    md = MySubDoc(name="", address={}, count=0, valid=False, tags=[])
+
+    assert {"name": "", "count": 0, "valid": False} == md.to_dict()
+
+
+def test_declarative_mapping_definition() -> None:
+    assert issubclass(MyDoc, AsyncDocument)
+    assert hasattr(MyDoc, "_doc_type")
+    assert {
+        "properties": {
+            "created_at": {"type": "date"},
+            "name": {"type": "text"},
+            "title": {"type": "keyword"},
+            "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}},
+        }
+    } == MyDoc._doc_type.mapping.to_dict()
+
+
+def test_you_can_supply_own_mapping_instance() -> None:
+    class MyD(AsyncDocument):
+        title = field.Text()
+
+        class Meta:
+            mapping = Mapping()
+            mapping.meta("_all", enabled=False)
+
+    assert {
+        "_all": {"enabled": False},
+        "properties": {"title": {"type": "text"}},
+    } == MyD._doc_type.mapping.to_dict()
+
+
+def test_document_can_be_created_dynamically() -> None:
+    n = datetime.now()
+    md = MyDoc(title="hello")
+    md.name = "My Fancy Document!"
+    md.created_at = n
+
+    inner = md.inner
+    # consistent returns
+    assert inner is md.inner
+    inner.old_field = "Already defined."
+
+    md.inner.new_field = ["undefined", "field"]
+
+    assert {
+        "title": "hello",
+        "name": "My Fancy Document!",
+        "created_at": n,
+        "inner": {"old_field": "Already defined.", "new_field": ["undefined", "field"]},
+    } == md.to_dict()
+
+
+def test_invalid_date_will_raise_exception() -> None:
+    md = MyDoc()
+    md.created_at = "not-a-date"
+    with raises(ValidationException):
+        md.full_clean()
+
+
+def test_document_inheritance() -> None:
+    assert issubclass(MySubDoc, MyDoc)
+    assert issubclass(MySubDoc, AsyncDocument)
+    assert hasattr(MySubDoc, "_doc_type")
+    assert {
+        "properties": {
+            "created_at": {"type": "date"},
+            "name": {"type": "keyword"},
+            "title": {"type": "keyword"},
+            "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}},
+        }
+    } == MySubDoc._doc_type.mapping.to_dict()
+
+
+def test_child_class_can_override_parent() -> None:
+    class A(AsyncDocument):
+        o = field.Object(dynamic=False, properties={"a": field.Text()})
+
+    class B(A):
+        o = field.Object(dynamic="strict", properties={"b": field.Text()})
+
+    assert {
+        "properties": {
+            "o": {
+                "dynamic": "strict",
+                "properties": {"a": {"type": "text"}, "b": {"type": "text"}},
+                "type": "object",
+            }
+        }
+    } == B._doc_type.mapping.to_dict()
+
+
+def test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict() -> None:
+    md = MySubDoc(meta={"id": 42}, name="My First doc!")
+
+    md.meta.index = "my-index"
+    assert md.meta.index == "my-index"
+    assert md.meta.id == 42
+    assert {"name": "My First doc!"} == md.to_dict()
+    assert {"id": 42, "index": "my-index"} == md.meta.to_dict()
+
+
+def test_index_inheritance() -> None:
+    assert issubclass(MyMultiSubDoc, MySubDoc)
+    assert issubclass(MyMultiSubDoc, MyDoc2)
+    assert issubclass(MyMultiSubDoc, AsyncDocument)
+    assert hasattr(MyMultiSubDoc, "_doc_type")
+    assert hasattr(MyMultiSubDoc, "_index")
+    assert {
+        "properties": {
+            "created_at": {"type": "date"},
+            "name": {"type": "keyword"},
+            "title": {"type": "keyword"},
+            "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}},
+            "extra": {"type": "long"},
+        }
+    } == MyMultiSubDoc._doc_type.mapping.to_dict()
+
+
+def test_meta_fields_can_be_set_directly_in_init() -> None:
+    p = object()
+    md = MyDoc(_id=p, title="Hello World!")
+
+    assert md.meta.id is p
+
+
+@pytest.mark.anyio
+async def test_save_no_index(async_mock_client: Any) -> None:
+    md = MyDoc()
+    with raises(ValidationException):
+        await md.save(using="mock")
+
+
+@pytest.mark.anyio
+async def test_delete_no_index(async_mock_client: Any) -> None:
+    md = MyDoc()
+    with raises(ValidationException):
+        await md.delete(using="mock")
+
+
+@pytest.mark.anyio
+async def test_update_no_fields() -> None:
+    md = MyDoc()
+    with raises(IllegalOperation):
+        await md.update()
+
+
+def test_search_with_custom_alias_and_index() -> None:
+    search_object = MyDoc.search(
+        using="staging", index=["custom_index1", "custom_index2"]
+    )
+
+    assert search_object._using == "staging"
+    assert search_object._index == ["custom_index1", "custom_index2"]
+
+
+def test_from_es_respects_underscored_non_meta_fields() -> None:
+    doc = {
+        "_index": "test-index",
+        "_id": "elasticsearch",
+        "_score": 12.0,
+        "fields": {"hello": "world", "_routing": "es", "_tags": ["search"]},
+        "_source": {
+            "city": "Amsterdam",
+            "name": "Elasticsearch",
+            "_tagline": "You know, for search",
+        },
+    }
+
+    class Company(AsyncDocument):
+        class Index:
+            name = "test-company"
+
+    c = Company.from_es(doc)
+
+    assert c.meta.fields._tags == ["search"]
+    assert c.meta.fields._routing == "es"
+    assert c._tagline == "You know, for search"
+
+
+def test_nested_and_object_inner_doc() -> None:
+    class MySubDocWithNested(MyDoc):
+        nested_inner = field.Nested(MyInner)
+
+    props = MySubDocWithNested._doc_type.mapping.to_dict()["properties"]
+    assert props == {
+        "created_at": {"type": "date"},
+        "inner": {"properties": {"old_field": {"type": "text"}}, "type": "object"},
+        "name": {"type": "text"},
+        "nested_inner": {
+            "properties": {"old_field": {"type": "text"}},
+            "type": "nested",
+        },
+        "title": {"type": "keyword"},
+    }
+
+
+def test_doc_with_type_hints() -> None:
+    class TypedInnerDoc(InnerDoc):
+        st: M[str]
+        dt: M[Optional[datetime]]
+        li: M[List[int]]
+
+    class TypedDoc(AsyncDocument):
+        st: str
+        dt: Optional[datetime]
+        li: List[int]
+        ob: TypedInnerDoc
+        ns: List[TypedInnerDoc]
+        ip: Optional[str] = field.Ip()
+        k1: str = field.Keyword(required=True)
+        k2: M[str] = field.Keyword()
+        k3: str = mapped_field(field.Keyword(), default="foo")
+        k4: M[Optional[str]] = mapped_field(field.Keyword())  # type: ignore[misc]
+        s1: Secret = SecretField()
+        s2: M[Secret] = SecretField()
+        s3: Secret = mapped_field(SecretField())  # type: ignore[misc]
+        s4: M[Optional[Secret]] = mapped_field(
+            SecretField(), default_factory=lambda: "foo"
+        )
+        i1: ClassVar
+        i2: ClassVar[int]
+        i3: str = mapped_field(exclude=True)
+
+    class TypedDocAnnotated(AsyncDocument):
+        st: Annotated[str, "foo"]
+        dt: Annotated[Optional[datetime], "bar"]
+        li: Annotated[List[int], "baz"]
+        ob: Annotated[TypedInnerDoc, "qux"]
+        ns: Annotated[List[TypedInnerDoc], "quux"]
+        ip: Annotated[Optional[str], field.Ip()]
+        k1: Annotated[str, field.Keyword(required=True)]
+        k2: Annotated[M[str], field.Keyword()]
+        k3: Annotated[str, mapped_field(field.Keyword(), default="foo")]
+        k4: Annotated[M[Optional[str]], mapped_field(field.Keyword())]  # type: ignore[misc]
+        s1: Annotated[Secret, SecretField()]
+        s2: Annotated[M[Secret], SecretField()]
+        s3: Annotated[Secret, mapped_field(SecretField())]  # type: ignore[misc]
+        s4: Annotated[
+            M[Optional[Secret]],
+            mapped_field(SecretField(), default_factory=lambda: "foo"),
+        ]
+        if sys.version_info[0] > 3 or (
+            sys.version_info[0] == 3 and sys.version_info[1] >= 11
+        ):
+            i1: Annotated[ClassVar, "classvar"]
+            i2: Annotated[ClassVar[int], "classvar"]
+        else:
+            i1: ClassVar
+            i2: ClassVar[int]
+        i3: Annotated[str, mapped_field(exclude=True)]
+
+    for doc_class in [TypedDoc, TypedDocAnnotated]:
+        props = doc_class._doc_type.mapping.to_dict()["properties"]
+        assert props == {
+            "st": {"type": "text"},
+            "dt": {"type": "date"},
+            "li": {"type": "integer"},
+            "ob": {
+                "type": "object",
+                "properties": {
+                    "st": {"type": "text"},
+                    "dt": {"type": "date"},
+                    "li": {"type": "integer"},
+                },
+            },
+            "ns": {
+                "type": "nested",
+                "properties": {
+                    "st": {"type": "text"},
+                    "dt": {"type": "date"},
+                    "li": {"type": "integer"},
+                },
+            },
+            "ip": {"type": "ip"},
+            "k1": {"type": "keyword"},
+            "k2": {"type": "keyword"},
+            "k3": {"type": "keyword"},
+            "k4": {"type": "keyword"},
+            "s1": {"type": "text"},
+            "s2": {"type": "text"},
+            "s3": {"type": "text"},
+            "s4": {"type": "text"},
+        }
+
+        doc_class.i1 = "foo"
+        doc_class.i2 = 123
+        doc_class.i3 = "bar"
+
+        doc = doc_class()
+        assert doc.k3 == "foo"
+        assert doc.s4 == "foo"
+        with raises(ValidationException) as exc_info:
+            doc.full_clean()
+        assert set(exc_info.value.args[0].keys()) == {
+            "st",
+            "k1",
+            "k2",
+            "ob",
+            "s1",
+            "s2",
+            "s3",
+        }
+
+        assert doc_class.i1 == "foo"
+        assert doc_class.i2 == 123
+        assert doc_class.i3 == "bar"
+
+        doc.st = "s"
+        doc.li = [1, 2, 3]
+        doc.k1 = "k1"
+        doc.k2 = "k2"
+        doc.ob.st = "s"
+        doc.ob.li = [1]
+        doc.s1 = "s1"
+        doc.s2 = "s2"
+        doc.s3 = "s3"
+        doc.full_clean()
+
+        doc.ob = TypedInnerDoc(li=[1])
+        with raises(ValidationException) as exc_info:
+            doc.full_clean()
+        assert set(exc_info.value.args[0].keys()) == {"ob"}
+        assert set(exc_info.value.args[0]["ob"][0].args[0].keys()) == {"st"}
+
+        doc.ob.st = "s"
+        doc.ns.append(TypedInnerDoc(li=[1, 2]))
+        with raises(ValidationException) as exc_info:
+            doc.full_clean()
+
+        doc.ns[0].st = "s"
+        doc.full_clean()
+
+        doc.ip = "1.2.3.4"
+        n = datetime.now()
+        doc.dt = n
+        assert doc.to_dict() == {
+            "st": "s",
+            "li": [1, 2, 3],
+            "dt": n,
+            "ob": {
+                "st": "s",
+                "li": [1],
+            },
+            "ns": [
+                {
+                    "st": "s",
+                    "li": [1, 2],
+                }
+            ],
+            "ip": "1.2.3.4",
+            "k1": "k1",
+            "k2": "k2",
+            "k3": "foo",
+            "s1": "s1",
+            "s2": "s2",
+            "s3": "s3",
+            "s4": "foo",
+        }
+
+        s = doc_class.search().sort(doc_class.st, -doc_class.dt, +doc_class.ob.st)
+        s.aggs.bucket("terms_agg", "terms", field=doc_class.k1)
+        d = s.to_dict()
+        assert d["aggs"] == {"terms_agg": {"terms": {"field": "k1"}}}
+        assert d["sort"] == ["st", {"dt": {"order": "desc"}}, "ob.st"]
+
+
+@pytest.mark.skipif(sys.version_info < (3, 10), reason="requires Python 3.10")
+def test_doc_with_pipe_type_hints() -> None:
+    with pytest.raises(TypeError):
+
+        class BadlyTypedDoc(AsyncDocument):
+            s: str
+            f: str | int | None  # type: ignore[syntax]
+
+    class TypedDoc(AsyncDocument):
+        s: str
+        f1: str | None  # type: ignore[syntax]
+        f2: M[int | None]  # type: ignore[syntax]
+        f3: M[datetime | None]  # type: ignore[syntax]
+
+    props = TypedDoc._doc_type.mapping.to_dict()["properties"]
+    assert props == {
+        "s": {"type": "text"},
+        "f1": {"type": "text"},
+        "f2": {"type": "integer"},
+        "f3": {"type": "date"},
+    }
+
+    doc = TypedDoc()
+    with raises(ValidationException) as exc_info:
+        doc.full_clean()
+    assert set(exc_info.value.args[0].keys()) == {"s"}
+    doc.s = "s"
+    doc.full_clean()
+
+
+def test_instrumented_field() -> None:
+    class Child(InnerDoc):
+        st: M[str]
+
+    class Doc(AsyncDocument):
+        st: str
+        ob: Child
+        ns: List[Child]
+
+    doc = Doc(
+        st="foo",
+        ob=Child(st="bar"),
+        ns=[
+            Child(st="baz"),
+            Child(st="qux"),
+        ],
+    )
+
+    assert type(doc.st) is str
+    assert doc.st == "foo"
+
+    assert type(doc.ob) is Child
+    assert doc.ob.st == "bar"
+
+    assert type(doc.ns) is utils.AttrList
+    assert doc.ns[0].st == "baz"
+    assert doc.ns[1].st == "qux"
+    assert type(doc.ns[0]) is Child
+    assert type(doc.ns[1]) is Child
+
+    assert type(Doc.st) is InstrumentedField
+    assert str(Doc.st) == "st"
+    assert +Doc.st == "st"
+    assert -Doc.st == "-st"
+    assert Doc.st.to_dict() == {"type": "text"}
+    with raises(AttributeError):
+        Doc.st.something
+
+    assert type(Doc.ob) is InstrumentedField
+    assert str(Doc.ob) == "ob"
+    assert str(Doc.ob.st) == "ob.st"
+    assert +Doc.ob.st == "ob.st"
+    assert -Doc.ob.st == "-ob.st"
+    assert Doc.ob.st.to_dict() == {"type": "text"}
+    with raises(AttributeError):
+        Doc.ob.something
+    with raises(AttributeError):
+        Doc.ob.st.something
+
+    assert type(Doc.ns) is InstrumentedField
+    assert str(Doc.ns) == "ns"
+    assert str(Doc.ns.st) == "ns.st"
+    assert +Doc.ns.st == "ns.st"
+    assert -Doc.ns.st == "-ns.st"
+    assert Doc.ns.st.to_dict() == {"type": "text"}
+    with raises(AttributeError):
+        Doc.ns.something
+    with raises(AttributeError):
+        Doc.ns.st.something
+
+
+def test_pydantic_integration() -> None:
+    class Location(pydantic.BaseModel):
+        city: str
+        country: str
+
+    class Address(pydantic.BaseModel):
+        street: str
+        location: Location
+
+    class Phone(pydantic.BaseModel):
+        type: str
+        number: str
+
+    class User(AsyncBaseESModel):
+        name: Annotated[str, Keyword()] = pydantic.Field(default="Unknown")
+        bio: str
+        age: int = pydantic.Field(strict=True, ge=0)
+        address: Address = pydantic.Field(
+            default=Address(
+                street="Unknown", location=Location(city="Unknown", country="Unknown")
+            )
+        )
+        phones: list[Phone] = pydantic.Field(default=[])
+
+    u = User(
+        name="foo",
+        bio="lorem ipsum",
+        age=30,
+        address=Address(
+            street="123 Main St.",
+            location=Location(city="San Francisco", country="USA"),
+        ),
+    )
+    u.phones.append(Phone(type="Home", number="+12345678900"))
+    assert u.model_dump() == {
+        "name": "foo",
+        "bio": "lorem ipsum",
+        "age": 30,
+        "address": {
+            "street": "123 Main St.",
+            "location": {
+                "city": "San Francisco",
+                "country": "USA",
+            },
+        },
+        "phones": [
+            {
+                "type": "Home",
+                "number": "+12345678900",
+            }
+        ],
+        "meta": {
+            "id": "",
+            "index": "",
+            "primary_term": 0,
+            "score": 0,
+            "seq_no": 0,
+            "version": 0,
+        },
+    }
+    udoc = u.to_doc()
+    assert udoc.name == "foo"
+    assert udoc.bio == "lorem ipsum"
+    assert udoc.age == 30
+    assert udoc.to_dict() == {
+        "name": "foo",
+        "bio": "lorem ipsum",
+        "age": 30,
+        "address": {
+            "street": "123 Main St.",
+            "location": {
+                "city": "San Francisco",
+                "country": "USA",
+            },
+        },
+        "phones": [
+            {
+                "type": "Home",
+                "number": "+12345678900",
+            }
+        ],
+    }
+
+    u2 = User.from_doc(udoc)
+    assert u == u2
+
+    with raises(pydantic.ValidationError):
+        _ = User()
+
+    with raises(pydantic.ValidationError):
+        _ = User(name="foo", bio="lorem ipsum")
+
+    with raises(pydantic.ValidationError):
+        _ = User(name="foo", bio="lorem ipsum", age=-10)
+
+    u3 = User(bio="lorem ipsum", age=30)
+    assert u3.name == "Unknown"
+    assert u3.address.location.country == "Unknown"
+    assert u3.phones == []
+    assert u3.to_doc().name == "Unknown"
+    assert u3.to_doc().address.location.country == "Unknown"
+    assert u3.to_doc().phones == []
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_async/test_faceted_search.py 9.2.0-1/test_elasticsearch/test_dsl/_async/test_faceted_search.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_async/test_faceted_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_async/test_faceted_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,201 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime
+
+import pytest
+
+from elasticsearch.dsl.faceted_search import (
+    AsyncFacetedSearch,
+    DateHistogramFacet,
+    TermsFacet,
+)
+
+
+class BlogSearch(AsyncFacetedSearch):
+    doc_types = ["user", "post"]
+    fields = [
+        "title^5",
+        "body",
+    ]
+
+    facets = {
+        "category": TermsFacet(field="category.raw"),
+        "tags": TermsFacet(field="tags"),
+    }
+
+
+def test_query_is_created_properly() -> None:
+    bs = BlogSearch("python search")
+    s = bs.build_search()
+
+    assert s._doc_type == ["user", "post"]
+    assert {
+        "aggs": {
+            "_filter_tags": {
+                "filter": {"match_all": {}},
+                "aggs": {"tags": {"terms": {"field": "tags"}}},
+            },
+            "_filter_category": {
+                "filter": {"match_all": {}},
+                "aggs": {"category": {"terms": {"field": "category.raw"}}},
+            },
+        },
+        "query": {
+            "multi_match": {"fields": ["title^5", "body"], "query": "python search"}
+        },
+        "highlight": {"fields": {"body": {}, "title": {}}},
+    } == s.to_dict()
+
+
+def test_query_is_created_properly_with_sort_tuple() -> None:
+    bs = BlogSearch("python search", sort=("category", "-title"))
+    s = bs.build_search()
+
+    assert s._doc_type == ["user", "post"]
+    assert {
+        "aggs": {
+            "_filter_tags": {
+                "filter": {"match_all": {}},
+                "aggs": {"tags": {"terms": {"field": "tags"}}},
+            },
+            "_filter_category": {
+                "filter": {"match_all": {}},
+                "aggs": {"category": {"terms": {"field": "category.raw"}}},
+            },
+        },
+        "query": {
+            "multi_match": {"fields": ["title^5", "body"], "query": "python search"}
+        },
+        "highlight": {"fields": {"body": {}, "title": {}}},
+        "sort": ["category", {"title": {"order": "desc"}}],
+    } == s.to_dict()
+
+
+def test_filter_is_applied_to_search_but_not_relevant_facet() -> None:
+    bs = BlogSearch("python search", filters={"category": "elastic"})
+    s = bs.build_search()
+
+    assert {
+        "aggs": {
+            "_filter_tags": {
+                "filter": {"terms": {"category.raw": ["elastic"]}},
+                "aggs": {"tags": {"terms": {"field": "tags"}}},
+            },
+            "_filter_category": {
+                "filter": {"match_all": {}},
+                "aggs": {"category": {"terms": {"field": "category.raw"}}},
+            },
+        },
+        "post_filter": {"terms": {"category.raw": ["elastic"]}},
+        "query": {
+            "multi_match": {"fields": ["title^5", "body"], "query": "python search"}
+        },
+        "highlight": {"fields": {"body": {}, "title": {}}},
+    } == s.to_dict()
+
+
+def test_filters_are_applied_to_search_ant_relevant_facets() -> None:
+    bs = BlogSearch(
+        "python search", filters={"category": "elastic", "tags": ["python", "django"]}
+    )
+    s = bs.build_search()
+
+    d = s.to_dict()
+
+    # we need to test post_filter without relying on order
+    f = d["post_filter"]["bool"].pop("must")
+    assert len(f) == 2
+    assert {"terms": {"category.raw": ["elastic"]}} in f
+    assert {"terms": {"tags": ["python", "django"]}} in f
+
+    assert {
+        "aggs": {
+            "_filter_tags": {
+                "filter": {"terms": {"category.raw": ["elastic"]}},
+                "aggs": {"tags": {"terms": {"field": "tags"}}},
+            },
+            "_filter_category": {
+                "filter": {"terms": {"tags": ["python", "django"]}},
+                "aggs": {"category": {"terms": {"field": "category.raw"}}},
+            },
+        },
+        "query": {
+            "multi_match": {"fields": ["title^5", "body"], "query": "python search"}
+        },
+        "post_filter": {"bool": {}},
+        "highlight": {"fields": {"body": {}, "title": {}}},
+    } == d
+
+
+def test_date_histogram_facet_with_1970_01_01_date() -> None:
+    dhf = DateHistogramFacet()
+    assert dhf.get_value({"key": None}) == datetime(1970, 1, 1, 0, 0)  # type: ignore[arg-type]
+    assert dhf.get_value({"key": 0}) == datetime(1970, 1, 1, 0, 0)  # type: ignore[arg-type]
+
+
+@pytest.mark.parametrize(
+    ["interval_type", "interval"],
+    [
+        ("interval", "year"),
+        ("calendar_interval", "year"),
+        ("interval", "month"),
+        ("calendar_interval", "month"),
+        ("interval", "week"),
+        ("calendar_interval", "week"),
+        ("interval", "day"),
+        ("calendar_interval", "day"),
+        ("fixed_interval", "day"),
+        ("interval", "hour"),
+        ("fixed_interval", "hour"),
+        ("interval", "1Y"),
+        ("calendar_interval", "1Y"),
+        ("interval", "1M"),
+        ("calendar_interval", "1M"),
+        ("interval", "1w"),
+        ("calendar_interval", "1w"),
+        ("interval", "1d"),
+        ("calendar_interval", "1d"),
+        ("fixed_interval", "1d"),
+        ("interval", "1h"),
+        ("fixed_interval", "1h"),
+    ],
+)
+def test_date_histogram_interval_types(interval_type: str, interval: str) -> None:
+    dhf = DateHistogramFacet(field="@timestamp", **{interval_type: interval})
+    assert dhf.get_aggregation().to_dict() == {
+        "date_histogram": {
+            "field": "@timestamp",
+            interval_type: interval,
+            "min_doc_count": 0,
+        }
+    }
+    dhf.get_value_filter(datetime.now())
+
+
+def test_date_histogram_no_interval_keyerror() -> None:
+    dhf = DateHistogramFacet(field="@timestamp")
+    with pytest.raises(KeyError) as e:
+        dhf.get_value_filter(datetime.now())
+    assert str(e.value) == "'interval'"
+
+
+def test_params_added_to_search() -> None:
+    bs = BlogSearch("python search")
+    assert bs._s._params == {}
+    bs.params(routing="42")
+    assert bs._s._params == {"routing": "42"}
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_async/test_index.py 9.2.0-1/test_elasticsearch/test_dsl/_async/test_index.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_async/test_index.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_async/test_index.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,197 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import string
+from random import choice
+from typing import Any, Dict
+
+import pytest
+from pytest import raises
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncIndex,
+    AsyncIndexTemplate,
+    Date,
+    Text,
+    analyzer,
+)
+
+
+class Post(AsyncDocument):
+    title = Text()
+    published_from = Date()
+
+
+def test_multiple_doc_types_will_combine_mappings() -> None:
+    class User(AsyncDocument):
+        username = Text()
+
+    i = AsyncIndex("i")
+    i.document(Post)
+    i.document(User)
+    assert {
+        "mappings": {
+            "properties": {
+                "title": {"type": "text"},
+                "username": {"type": "text"},
+                "published_from": {"type": "date"},
+            }
+        }
+    } == i.to_dict()
+
+
+def test_search_is_limited_to_index_name() -> None:
+    i = AsyncIndex("my-index")
+    s = i.search()
+
+    assert s._index == ["my-index"]
+
+
+def test_cloned_index_has_copied_settings_and_using() -> None:
+    client = object()
+    i = AsyncIndex("my-index", using=client)  # type: ignore[arg-type]
+    i.settings(number_of_shards=1)
+
+    i2 = i.clone("my-other-index")
+
+    assert "my-other-index" == i2._name
+    assert client is i2._using
+    assert i._settings == i2._settings
+    assert i._settings is not i2._settings
+
+
+def test_cloned_index_has_analysis_attribute() -> None:
+    """
+    Regression test for Issue #582 in which `AsyncIndex.clone()` was not copying
+    over the `_analysis` attribute.
+    """
+    client = object()
+    i = AsyncIndex("my-index", using=client)  # type: ignore[arg-type]
+
+    random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100))
+    random_analyzer = analyzer(
+        random_analyzer_name, tokenizer="standard", filter="standard"
+    )
+
+    i.analyzer(random_analyzer)
+
+    i2 = i.clone("my-clone-index")
+
+    assert i.to_dict()["settings"]["analysis"] == i2.to_dict()["settings"]["analysis"]
+
+
+def test_settings_are_saved() -> None:
+    i = AsyncIndex("i")
+    i.settings(number_of_replicas=0)
+    i.settings(number_of_shards=1)
+
+    assert {"settings": {"number_of_shards": 1, "number_of_replicas": 0}} == i.to_dict()
+
+
+def test_registered_doc_type_included_in_to_dict() -> None:
+    i = AsyncIndex("i", using="alias")
+    i.document(Post)
+
+    assert {
+        "mappings": {
+            "properties": {
+                "title": {"type": "text"},
+                "published_from": {"type": "date"},
+            }
+        }
+    } == i.to_dict()
+
+
+def test_registered_doc_type_included_in_search() -> None:
+    i = AsyncIndex("i", using="alias")
+    i.document(Post)
+
+    s = i.search()
+
+    assert s._doc_type == [Post]
+
+
+def test_aliases_add_to_object() -> None:
+    random_alias = "".join(choice(string.ascii_letters) for _ in range(100))
+    alias_dict: Dict[str, Any] = {random_alias: {}}
+
+    index = AsyncIndex("i", using="alias")
+    index.aliases(**alias_dict)
+
+    assert index._aliases == alias_dict
+
+
+def test_aliases_returned_from_to_dict() -> None:
+    random_alias = "".join(choice(string.ascii_letters) for _ in range(100))
+    alias_dict: Dict[str, Any] = {random_alias: {}}
+
+    index = AsyncIndex("i", using="alias")
+    index.aliases(**alias_dict)
+
+    assert index._aliases == index.to_dict()["aliases"] == alias_dict
+
+
+def test_analyzers_added_to_object() -> None:
+    random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100))
+    random_analyzer = analyzer(
+        random_analyzer_name, tokenizer="standard", filter="standard"
+    )
+
+    index = AsyncIndex("i", using="alias")
+    index.analyzer(random_analyzer)
+
+    assert index._analysis["analyzer"][random_analyzer_name] == {
+        "filter": ["standard"],
+        "type": "custom",
+        "tokenizer": "standard",
+    }
+
+
+def test_analyzers_returned_from_to_dict() -> None:
+    random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100))
+    random_analyzer = analyzer(
+        random_analyzer_name, tokenizer="standard", filter="standard"
+    )
+    index = AsyncIndex("i", using="alias")
+    index.analyzer(random_analyzer)
+
+    assert index.to_dict()["settings"]["analysis"]["analyzer"][
+        random_analyzer_name
+    ] == {"filter": ["standard"], "type": "custom", "tokenizer": "standard"}
+
+
+def test_conflicting_analyzer_raises_error() -> None:
+    i = AsyncIndex("i")
+    i.analyzer("my_analyzer", tokenizer="whitespace", filter=["lowercase", "stop"])
+
+    with raises(ValueError):
+        i.analyzer("my_analyzer", tokenizer="keyword", filter=["lowercase", "stop"])
+
+
+def test_index_template_can_have_order() -> None:
+    i = AsyncIndex("i-*")
+    it = i.as_template("i", order=2)
+
+    assert {"index_patterns": ["i-*"], "order": 2} == it.to_dict()
+
+
+@pytest.mark.anyio
+async def test_index_template_save_result(async_mock_client: Any) -> None:
+    it = AsyncIndexTemplate("test-template", "test-*")
+
+    assert await it.save(using="mock") == await async_mock_client.indices.put_template()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_async/test_mapping.py 9.2.0-1/test_elasticsearch/test_dsl/_async/test_mapping.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_async/test_mapping.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_async/test_mapping.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,222 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import json
+
+from elasticsearch.dsl import AsyncMapping, Keyword, Nested, Text, analysis
+
+
+def test_mapping_can_has_fields() -> None:
+    m = AsyncMapping()
+    m.field("name", "text").field("tags", "keyword")
+
+    assert {
+        "properties": {"name": {"type": "text"}, "tags": {"type": "keyword"}}
+    } == m.to_dict()
+
+
+def test_mapping_update_is_recursive() -> None:
+    m1 = AsyncMapping()
+    m1.field("title", "text")
+    m1.field("author", "object")
+    m1.field("author", "object", properties={"name": {"type": "text"}})
+    m1.meta("_all", enabled=False)
+    m1.meta("dynamic", False)
+
+    m2 = AsyncMapping()
+    m2.field("published_from", "date")
+    m2.field("author", "object", properties={"email": {"type": "text"}})
+    m2.field("title", "text")
+    m2.field("lang", "keyword")
+    m2.meta("_analyzer", path="lang")
+
+    m1.update(m2, update_only=True)
+
+    assert {
+        "_all": {"enabled": False},
+        "_analyzer": {"path": "lang"},
+        "dynamic": False,
+        "properties": {
+            "published_from": {"type": "date"},
+            "title": {"type": "text"},
+            "lang": {"type": "keyword"},
+            "author": {
+                "type": "object",
+                "properties": {"name": {"type": "text"}, "email": {"type": "text"}},
+            },
+        },
+    } == m1.to_dict()
+
+
+def test_properties_can_iterate_over_all_the_fields() -> None:
+    m = AsyncMapping()
+    m.field("f1", "text", test_attr="f1", fields={"f2": Keyword(test_attr="f2")})
+    m.field("f3", Nested(test_attr="f3", properties={"f4": Text(test_attr="f4")}))
+
+    assert {"f1", "f2", "f3", "f4"} == {
+        f.test_attr for f in m.properties._collect_fields()
+    }
+
+
+def test_mapping_can_collect_all_analyzers_and_normalizers() -> None:
+    a1 = analysis.analyzer(
+        "my_analyzer1",
+        tokenizer="keyword",
+        filter=[
+            "lowercase",
+            analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]),
+        ],
+    )
+    a2 = analysis.analyzer("english")
+    a3 = analysis.analyzer("unknown_custom")
+    a4 = analysis.analyzer(
+        "my_analyzer2",
+        tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3),
+        filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])],
+    )
+    a5 = analysis.analyzer("my_analyzer3", tokenizer="keyword")
+    n1 = analysis.normalizer("my_normalizer1", filter=["lowercase"])
+    n2 = analysis.normalizer(
+        "my_normalizer2",
+        filter=[
+            "my_filter1",
+            "my_filter2",
+            analysis.token_filter("my_filter3", "stop", stopwords=["e", "f"]),
+        ],
+    )
+    n3 = analysis.normalizer("unknown_custom")
+
+    m = AsyncMapping()
+    m.field(
+        "title",
+        "text",
+        analyzer=a1,
+        fields={"english": Text(analyzer=a2), "unknown": Keyword(search_analyzer=a3)},
+    )
+    m.field("comments", Nested(properties={"author": Text(analyzer=a4)}))
+    m.field("normalized_title", "keyword", normalizer=n1)
+    m.field("normalized_comment", "keyword", normalizer=n2)
+    m.field("unknown", "keyword", normalizer=n3)
+    m.meta("_all", analyzer=a5)
+
+    assert {
+        "analyzer": {
+            "my_analyzer1": {
+                "filter": ["lowercase", "my_filter1"],
+                "tokenizer": "keyword",
+                "type": "custom",
+            },
+            "my_analyzer2": {
+                "filter": ["my_filter2"],
+                "tokenizer": "trigram",
+                "type": "custom",
+            },
+            "my_analyzer3": {"tokenizer": "keyword", "type": "custom"},
+        },
+        "normalizer": {
+            "my_normalizer1": {"filter": ["lowercase"], "type": "custom"},
+            "my_normalizer2": {
+                "filter": ["my_filter1", "my_filter2", "my_filter3"],
+                "type": "custom",
+            },
+        },
+        "filter": {
+            "my_filter1": {"stopwords": ["a", "b"], "type": "stop"},
+            "my_filter2": {"stopwords": ["c", "d"], "type": "stop"},
+            "my_filter3": {"stopwords": ["e", "f"], "type": "stop"},
+        },
+        "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}},
+    } == m._collect_analysis()
+
+    assert json.loads(json.dumps(m.to_dict())) == m.to_dict()
+
+
+def test_mapping_can_collect_multiple_analyzers() -> None:
+    a1 = analysis.analyzer(
+        "my_analyzer1",
+        tokenizer="keyword",
+        filter=[
+            "lowercase",
+            analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]),
+        ],
+    )
+    a2 = analysis.analyzer(
+        "my_analyzer2",
+        tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3),
+        filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])],
+    )
+    m = AsyncMapping()
+    m.field("title", "text", analyzer=a1, search_analyzer=a2)
+    m.field(
+        "text",
+        "text",
+        analyzer=a1,
+        fields={
+            "english": Text(analyzer=a1),
+            "unknown": Keyword(analyzer=a1, search_analyzer=a2),
+        },
+    )
+    assert {
+        "analyzer": {
+            "my_analyzer1": {
+                "filter": ["lowercase", "my_filter1"],
+                "tokenizer": "keyword",
+                "type": "custom",
+            },
+            "my_analyzer2": {
+                "filter": ["my_filter2"],
+                "tokenizer": "trigram",
+                "type": "custom",
+            },
+        },
+        "filter": {
+            "my_filter1": {"stopwords": ["a", "b"], "type": "stop"},
+            "my_filter2": {"stopwords": ["c", "d"], "type": "stop"},
+        },
+        "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}},
+    } == m._collect_analysis()
+
+
+def test_even_non_custom_analyzers_can_have_params() -> None:
+    a1 = analysis.analyzer("whitespace", type="pattern", pattern=r"\\s+")
+    m = AsyncMapping()
+    m.field("title", "text", analyzer=a1)
+
+    assert {
+        "analyzer": {"whitespace": {"type": "pattern", "pattern": r"\\s+"}}
+    } == m._collect_analysis()
+
+
+def test_resolve_field_can_resolve_multifields() -> None:
+    m = AsyncMapping()
+    m.field("title", "text", fields={"keyword": Keyword()})
+
+    assert isinstance(m.resolve_field("title.keyword"), Keyword)
+
+
+def test_resolve_nested() -> None:
+    m = AsyncMapping()
+    m.field("n1", "nested", properties={"n2": Nested(properties={"k1": Keyword()})})
+    m.field("k2", "keyword")
+
+    nested, field = m.resolve_nested("n1.n2.k1")
+    assert nested == ["n1", "n1.n2"]
+    assert isinstance(field, Keyword)
+
+    nested, field = m.resolve_nested("k2")
+    assert nested == []
+    assert isinstance(field, Keyword)
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_async/test_search.py 9.2.0-1/test_elasticsearch/test_dsl/_async/test_search.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_async/test_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_async/test_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,841 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from copy import deepcopy
+from typing import Any
+
+import pytest
+from pytest import raises
+
+from elasticsearch.dsl import (
+    AsyncEmptySearch,
+    AsyncSearch,
+    Document,
+    Q,
+    query,
+    types,
+    wrappers,
+)
+from elasticsearch.dsl.exceptions import IllegalOperation
+
+
+def test_expand__to_dot_is_respected() -> None:
+    s = AsyncSearch().query("match", a__b=42, _expand__to_dot=False)
+
+    assert {"query": {"match": {"a__b": 42}}} == s.to_dict()
+
+
+@pytest.mark.anyio
+async def test_execute_uses_cache() -> None:
+    s = AsyncSearch()
+    r = object()
+    s._response = r  # type: ignore[assignment]
+
+    assert r is await s.execute()
+
+
+@pytest.mark.anyio
+async def test_cache_can_be_ignored(async_mock_client: Any) -> None:
+    s = AsyncSearch(using="mock")
+    r = object()
+    s._response = r  # type: ignore[assignment]
+    await s.execute(ignore_cache=True)
+
+    async_mock_client.search.assert_awaited_once_with(index=None, body={})
+
+
+@pytest.mark.anyio
+async def test_iter_iterates_over_hits() -> None:
+    s = AsyncSearch()
+    s._response = [1, 2, 3]  # type: ignore[assignment]
+
+    assert [1, 2, 3] == [hit async for hit in s]
+
+
+def test_cache_isnt_cloned() -> None:
+    s = AsyncSearch()
+    s._response = object()  # type: ignore[assignment]
+
+    assert not hasattr(s._clone(), "_response")
+
+
+def test_search_starts_with_no_query() -> None:
+    s = AsyncSearch()
+
+    assert s.query._proxied is None
+
+
+def test_search_query_combines_query() -> None:
+    s = AsyncSearch()
+
+    s2 = s.query("match", f=42)
+    assert s2.query._proxied == query.Match(f=42)
+    assert s.query._proxied is None
+
+    s3 = s2.query("match", f=43)
+    assert s2.query._proxied == query.Match(f=42)
+    assert s3.query._proxied == query.Bool(must=[query.Match(f=42), query.Match(f=43)])
+
+
+def test_query_can_be_assigned_to() -> None:
+    s = AsyncSearch()
+
+    q = Q("match", title="python")
+    s.query = q  # type: ignore
+
+    assert s.query._proxied is q
+
+
+def test_query_can_be_wrapped() -> None:
+    s = AsyncSearch().query("match", title="python")
+
+    s.query = Q("function_score", query=s.query, field_value_factor={"field": "rating"})  # type: ignore
+
+    assert {
+        "query": {
+            "function_score": {
+                "functions": [{"field_value_factor": {"field": "rating"}}],
+                "query": {"match": {"title": "python"}},
+            }
+        }
+    } == s.to_dict()
+
+
+def test_using() -> None:
+    o = object()
+    o2 = object()
+    s = AsyncSearch(using=o)
+    assert s._using is o
+    s2 = s.using(o2)  # type: ignore[arg-type]
+    assert s._using is o
+    assert s2._using is o2
+
+
+def test_methods_are_proxied_to_the_query() -> None:
+    s = AsyncSearch().query("match_all")
+
+    assert s.query.to_dict() == {"match_all": {}}
+
+
+def test_query_always_returns_search() -> None:
+    s = AsyncSearch()
+
+    assert isinstance(s.query("match", f=42), AsyncSearch)
+
+
+def test_source_copied_on_clone() -> None:
+    s = AsyncSearch().source(False)
+    assert s._clone()._source == s._source
+    assert s._clone()._source is False
+
+    s2 = AsyncSearch().source([])
+    assert s2._clone()._source == s2._source
+    assert s2._source == []
+
+    s3 = AsyncSearch().source(["some", "fields"])
+    assert s3._clone()._source == s3._source
+    assert s3._clone()._source == ["some", "fields"]
+
+
+def test_copy_clones() -> None:
+    from copy import copy
+
+    s1 = AsyncSearch().source(["some", "fields"])
+    s2 = copy(s1)
+
+    assert s1 == s2
+    assert s1 is not s2
+
+
+def test_aggs_allow_two_metric() -> None:
+    s = AsyncSearch()
+
+    s.aggs.metric("a", "max", field="a").metric("b", "max", field="b")
+
+    assert s.to_dict() == {
+        "aggs": {"a": {"max": {"field": "a"}}, "b": {"max": {"field": "b"}}}
+    }
+
+
+def test_aggs_get_copied_on_change() -> None:
+    s = AsyncSearch().query("match_all")
+    s.aggs.bucket("per_tag", "terms", field="f").metric(
+        "max_score", "max", field="score"
+    )
+
+    s2 = s.query("match_all")
+    s2.aggs.bucket("per_month", "date_histogram", field="date", interval="month")
+    s3 = s2.query("match_all")
+    s3.aggs["per_month"].metric("max_score", "max", field="score")
+    s4 = s3._clone()
+    s4.aggs.metric("max_score", "max", field="score")
+
+    d: Any = {
+        "query": {"match_all": {}},
+        "aggs": {
+            "per_tag": {
+                "terms": {"field": "f"},
+                "aggs": {"max_score": {"max": {"field": "score"}}},
+            }
+        },
+    }
+
+    assert d == s.to_dict()
+    d["aggs"]["per_month"] = {"date_histogram": {"field": "date", "interval": "month"}}
+    assert d == s2.to_dict()
+    d["aggs"]["per_month"]["aggs"] = {"max_score": {"max": {"field": "score"}}}
+    assert d == s3.to_dict()
+    d["aggs"]["max_score"] = {"max": {"field": "score"}}
+    assert d == s4.to_dict()
+
+
+def test_search_index() -> None:
+    s = AsyncSearch(index="i")
+    assert s._index == ["i"]
+    s = s.index("i2")
+    assert s._index == ["i", "i2"]
+    s = s.index("i3")
+    assert s._index == ["i", "i2", "i3"]
+    s = s.index()
+    assert s._index is None
+    s = AsyncSearch(index=("i", "i2"))
+    assert s._index == ["i", "i2"]
+    s = AsyncSearch(index=["i", "i2"])
+    assert s._index == ["i", "i2"]
+    s = AsyncSearch()
+    s = s.index("i", "i2")
+    assert s._index == ["i", "i2"]
+    s2 = s.index("i3")
+    assert s._index == ["i", "i2"]
+    assert s2._index == ["i", "i2", "i3"]
+    s = AsyncSearch()
+    s = s.index(["i", "i2"], "i3")
+    assert s._index == ["i", "i2", "i3"]
+    s2 = s.index("i4")
+    assert s._index == ["i", "i2", "i3"]
+    assert s2._index == ["i", "i2", "i3", "i4"]
+    s2 = s.index(["i4"])
+    assert s2._index == ["i", "i2", "i3", "i4"]
+    s2 = s.index(("i4", "i5"))
+    assert s2._index == ["i", "i2", "i3", "i4", "i5"]
+
+
+def test_doc_type_document_class() -> None:
+    class MyDocument(Document):
+        pass
+
+    s = AsyncSearch(doc_type=MyDocument)
+    assert s._doc_type == [MyDocument]
+    assert s._doc_type_map == {}
+
+    s = AsyncSearch().doc_type(MyDocument)
+    assert s._doc_type == [MyDocument]
+    assert s._doc_type_map == {}
+
+
+def test_knn() -> None:
+    s = AsyncSearch()
+
+    with raises(TypeError):
+        s.knn()  # type: ignore[call-arg]
+    with raises(TypeError):
+        s.knn("field")  # type: ignore[call-arg]
+    with raises(TypeError):
+        s.knn("field", 5)  # type: ignore[call-arg]
+    with raises(ValueError):
+        s.knn("field", 5, 100)
+    with raises(ValueError):
+        s.knn("field", 5, 100, query_vector=[1, 2, 3], query_vector_builder={})
+
+    s = s.knn("field", 5, 100, query_vector=[1, 2, 3])
+    assert {
+        "knn": {
+            "field": "field",
+            "k": 5,
+            "num_candidates": 100,
+            "query_vector": [1, 2, 3],
+        }
+    } == s.to_dict()
+
+    s = s.knn(
+        k=4,
+        num_candidates=40,
+        boost=0.8,
+        field="name",
+        query_vector_builder={
+            "text_embedding": {"model_id": "foo", "model_text": "search text"}
+        },
+        inner_hits={"size": 1},
+    )
+    assert {
+        "knn": [
+            {
+                "field": "field",
+                "k": 5,
+                "num_candidates": 100,
+                "query_vector": [1, 2, 3],
+            },
+            {
+                "field": "name",
+                "k": 4,
+                "num_candidates": 40,
+                "query_vector_builder": {
+                    "text_embedding": {"model_id": "foo", "model_text": "search text"}
+                },
+                "boost": 0.8,
+                "inner_hits": {"size": 1},
+            },
+        ]
+    } == s.to_dict()
+
+
+def test_rank() -> None:
+    s = AsyncSearch()
+    s.rank(rrf=False)
+    assert {} == s.to_dict()
+
+    s = s.rank(rrf=True)
+    assert {"rank": {"rrf": {}}} == s.to_dict()
+
+    s = s.rank(rrf={"window_size": 50, "rank_constant": 20})
+    assert {"rank": {"rrf": {"window_size": 50, "rank_constant": 20}}} == s.to_dict()
+
+
+def test_sort() -> None:
+    s = AsyncSearch()
+    s = s.sort("fielda", "-fieldb")
+
+    assert ["fielda", {"fieldb": {"order": "desc"}}] == s._sort
+    assert {"sort": ["fielda", {"fieldb": {"order": "desc"}}]} == s.to_dict()
+
+    s = s.sort()
+    assert [] == s._sort
+    assert AsyncSearch().to_dict() == s.to_dict()
+
+
+def test_sort_by_score() -> None:
+    s = AsyncSearch()
+    s = s.sort("_score")
+    assert {"sort": ["_score"]} == s.to_dict()
+
+    s = AsyncSearch()
+    with raises(IllegalOperation):
+        s.sort("-_score")
+
+
+def test_collapse() -> None:
+    s = AsyncSearch()
+
+    inner_hits = {"name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}]}
+    s = s.collapse("user.id", inner_hits=inner_hits, max_concurrent_group_searches=4)
+
+    assert {
+        "field": "user.id",
+        "inner_hits": {
+            "name": "most_recent",
+            "size": 5,
+            "sort": [{"@timestamp": "desc"}],
+        },
+        "max_concurrent_group_searches": 4,
+    } == s._collapse
+    assert {
+        "collapse": {
+            "field": "user.id",
+            "inner_hits": {
+                "name": "most_recent",
+                "size": 5,
+                "sort": [{"@timestamp": "desc"}],
+            },
+            "max_concurrent_group_searches": 4,
+        }
+    } == s.to_dict()
+
+    s = s.collapse()
+    assert {} == s._collapse
+    assert AsyncSearch().to_dict() == s.to_dict()
+
+
+def test_slice() -> None:
+    s = AsyncSearch()
+    assert {"from": 3, "size": 7} == s[3:10].to_dict()
+    assert {"size": 5} == s[:5].to_dict()
+    assert {"from": 3} == s[3:].to_dict()
+    assert {"from": 0, "size": 0} == s[0:0].to_dict()
+    assert {"from": 20, "size": 0} == s[20:0].to_dict()
+    assert {"from": 10, "size": 5} == s[10:][:5].to_dict()
+    assert {"from": 10, "size": 0} == s[:5][10:].to_dict()
+    assert {"size": 10} == s[:10][:40].to_dict()
+    assert {"size": 10} == s[:40][:10].to_dict()
+    assert {"size": 40} == s[:40][:80].to_dict()
+    assert {"from": 12, "size": 0} == s[:5][10:][2:].to_dict()
+    assert {"from": 15, "size": 0} == s[10:][:5][5:].to_dict()
+    assert {} == s[:].to_dict()
+    with raises(ValueError):
+        s[-1:]
+    with raises(ValueError):
+        s[4:-1]
+    with raises(ValueError):
+        s[-3:-2]
+
+
+def test_index() -> None:
+    s = AsyncSearch()
+    assert {"from": 3, "size": 1} == s[3].to_dict()
+    assert {"from": 3, "size": 1} == s[3][0].to_dict()
+    assert {"from": 8, "size": 0} == s[3][5].to_dict()
+    assert {"from": 4, "size": 1} == s[3:10][1].to_dict()
+    with raises(ValueError):
+        s[-3]
+
+
+def test_search_to_dict() -> None:
+    s = AsyncSearch()
+    assert {} == s.to_dict()
+
+    s = s.query("match", f=42)
+    assert {"query": {"match": {"f": 42}}} == s.to_dict()
+
+    assert {"query": {"match": {"f": 42}}, "size": 10} == s.to_dict(size=10)
+
+    s.aggs.bucket("per_tag", "terms", field="f").metric(
+        "max_score", "max", field="score"
+    )
+    d = {
+        "aggs": {
+            "per_tag": {
+                "terms": {"field": "f"},
+                "aggs": {"max_score": {"max": {"field": "score"}}},
+            }
+        },
+        "query": {"match": {"f": 42}},
+    }
+    assert d == s.to_dict()
+
+    s = AsyncSearch(extra={"size": 5})
+    assert {"size": 5} == s.to_dict()
+    s = s.extra(from_=42)
+    assert {"size": 5, "from": 42} == s.to_dict()
+
+
+def test_complex_example() -> None:
+    s = AsyncSearch()
+    s = (
+        s.query("match", title="python")
+        .query(~Q("match", title="ruby"))
+        .filter(Q("term", category="meetup") | Q("term", category="conference"))
+        .collapse("user_id")
+        .post_filter("terms", tags=["prague", "czech"])
+        .script_fields(more_attendees="doc['attendees'].value + 42")
+    )
+
+    s.aggs.bucket("per_country", "terms", field="country").metric(
+        "avg_attendees", "avg", field="attendees"
+    )
+
+    s.query.minimum_should_match = 2
+
+    s = s.highlight_options(order="score").highlight("title", "body", fragment_size=50)
+
+    assert {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": "meetup"}},
+                                {"term": {"category": "conference"}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [{"match": {"title": "python"}}],
+                "must_not": [{"match": {"title": "ruby"}}],
+                "minimum_should_match": 2,
+            }
+        },
+        "post_filter": {"terms": {"tags": ["prague", "czech"]}},
+        "aggs": {
+            "per_country": {
+                "terms": {"field": "country"},
+                "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}},
+            }
+        },
+        "collapse": {"field": "user_id"},
+        "highlight": {
+            "order": "score",
+            "fields": {"title": {"fragment_size": 50}, "body": {"fragment_size": 50}},
+        },
+        "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}},
+    } == s.to_dict()
+
+
+def test_reverse() -> None:
+    d = {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": "meetup"}},
+                                {"term": {"category": "conference"}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [
+                    {
+                        "bool": {
+                            "must": [{"match": {"title": "python"}}],
+                            "must_not": [{"match": {"title": "ruby"}}],
+                            "minimum_should_match": 2,
+                        }
+                    }
+                ],
+            }
+        },
+        "post_filter": {"bool": {"must": [{"terms": {"tags": ["prague", "czech"]}}]}},
+        "aggs": {
+            "per_country": {
+                "terms": {"field": "country"},
+                "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}},
+            }
+        },
+        "sort": ["title", {"category": {"order": "desc"}}, "_score"],
+        "size": 5,
+        "highlight": {"order": "score", "fields": {"title": {"fragment_size": 50}}},
+        "suggest": {
+            "my-title-suggestions-1": {
+                "text": "devloping distibutd saerch engies",
+                "term": {"size": 3, "field": "title"},
+            }
+        },
+        "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}},
+    }
+
+    d2 = deepcopy(d)
+
+    s = AsyncSearch.from_dict(d)
+
+    # make sure we haven't modified anything in place
+    assert d == d2
+    assert {"size": 5} == s._extra
+    assert d == s.to_dict()
+
+
+def test_code_generated_classes() -> None:
+    s = AsyncSearch()
+    s = (
+        s.query(query.Match("title", types.MatchQuery(query="python")))
+        .query(~query.Match("title", types.MatchQuery(query="ruby")))
+        .query(
+            query.Knn(
+                field="title",
+                query_vector=[1.0, 2.0, 3.0],
+                num_candidates=10,
+                k=3,
+                filter=query.Range("year", wrappers.Range(gt="2004")),
+            )
+        )
+        .filter(
+            query.Term("category", types.TermQuery(value="meetup"))
+            | query.Term("category", types.TermQuery(value="conference"))
+        )
+        .collapse("user_id")
+        .post_filter(query.Terms(tags=["prague", "czech"]))
+        .script_fields(more_attendees="doc['attendees'].value + 42")
+    )
+    assert {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": {"value": "meetup"}}},
+                                {"term": {"category": {"value": "conference"}}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [
+                    {"match": {"title": {"query": "python"}}},
+                    {
+                        "knn": {
+                            "field": "title",
+                            "filter": [
+                                {
+                                    "range": {
+                                        "year": {
+                                            "gt": "2004",
+                                        },
+                                    },
+                                },
+                            ],
+                            "k": 3,
+                            "num_candidates": 10,
+                            "query_vector": [
+                                1.0,
+                                2.0,
+                                3.0,
+                            ],
+                        },
+                    },
+                ],
+                "must_not": [{"match": {"title": {"query": "ruby"}}}],
+            }
+        },
+        "post_filter": {"terms": {"tags": ["prague", "czech"]}},
+        "collapse": {"field": "user_id"},
+        "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}},
+    } == s.to_dict()
+
+
+def test_from_dict_doesnt_need_query() -> None:
+    s = AsyncSearch.from_dict({"size": 5})
+
+    assert {"size": 5} == s.to_dict()
+
+
+@pytest.mark.anyio
+async def test_params_being_passed_to_search(async_mock_client: Any) -> None:
+    s = AsyncSearch(using="mock")
+    s = s.params(routing="42")
+    await s.execute()
+
+    async_mock_client.search.assert_awaited_once_with(index=None, body={}, routing="42")
+
+
+def test_source() -> None:
+    assert {} == AsyncSearch().source().to_dict()
+
+    assert {
+        "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]}
+    } == AsyncSearch().source(includes=["foo.bar.*"], excludes=("foo.one",)).to_dict()
+
+    assert {"_source": False} == AsyncSearch().source(False).to_dict()
+
+    assert {"_source": ["f1", "f2"]} == AsyncSearch().source(
+        includes=["foo.bar.*"], excludes=["foo.one"]
+    ).source(["f1", "f2"]).to_dict()
+
+
+def test_source_on_clone() -> None:
+    assert {
+        "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]},
+        "query": {"bool": {"filter": [{"term": {"title": "python"}}]}},
+    } == AsyncSearch().source(includes=["foo.bar.*"]).source(
+        excludes=["foo.one"]
+    ).filter(
+        "term", title="python"
+    ).to_dict()
+    assert {
+        "_source": False,
+        "query": {"bool": {"filter": [{"term": {"title": "python"}}]}},
+    } == AsyncSearch().source(False).filter("term", title="python").to_dict()
+
+
+def test_source_on_clear() -> None:
+    assert (
+        {}
+        == AsyncSearch()
+        .source(includes=["foo.bar.*"])
+        .source(includes=None, excludes=None)
+        .to_dict()
+    )
+
+
+def test_suggest_accepts_global_text() -> None:
+    s = AsyncSearch.from_dict(
+        {
+            "suggest": {
+                "text": "the amsterdma meetpu",
+                "my-suggest-1": {"term": {"field": "title"}},
+                "my-suggest-2": {"text": "other", "term": {"field": "body"}},
+            }
+        }
+    )
+
+    assert {
+        "suggest": {
+            "my-suggest-1": {
+                "term": {"field": "title"},
+                "text": "the amsterdma meetpu",
+            },
+            "my-suggest-2": {"term": {"field": "body"}, "text": "other"},
+        }
+    } == s.to_dict()
+
+
+def test_suggest() -> None:
+    s = AsyncSearch()
+    s = s.suggest("my_suggestion", "pyhton", term={"field": "title"})
+
+    assert {
+        "suggest": {"my_suggestion": {"term": {"field": "title"}, "text": "pyhton"}}
+    } == s.to_dict()
+
+
+def test_exclude() -> None:
+    s = AsyncSearch()
+    s = s.exclude("match", title="python")
+
+    assert {
+        "query": {
+            "bool": {
+                "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}]
+            }
+        }
+    } == s.to_dict()
+
+
+@pytest.mark.anyio
+async def test_delete_by_query(async_mock_client: Any) -> None:
+    s = AsyncSearch(using="mock", index="i").query("match", lang="java")
+    await s.delete()
+
+    async_mock_client.delete_by_query.assert_awaited_once_with(
+        index=["i"], body={"query": {"match": {"lang": "java"}}}
+    )
+
+
+def test_update_from_dict() -> None:
+    s = AsyncSearch()
+    s.update_from_dict({"indices_boost": [{"important-documents": 2}]})
+    s.update_from_dict({"_source": ["id", "name"]})
+    s.update_from_dict({"collapse": {"field": "user_id"}})
+
+    assert {
+        "indices_boost": [{"important-documents": 2}],
+        "_source": ["id", "name"],
+        "collapse": {"field": "user_id"},
+    } == s.to_dict()
+
+
+def test_rescore_query_to_dict() -> None:
+    s = AsyncSearch(index="index-name")
+
+    positive_query = Q(
+        "function_score",
+        query=Q("term", tags="a"),
+        script_score={"script": "_score * 1"},
+    )
+
+    negative_query = Q(
+        "function_score",
+        query=Q("term", tags="b"),
+        script_score={"script": "_score * -100"},
+    )
+
+    s = s.query(positive_query)
+    s = s.extra(
+        rescore={"window_size": 100, "query": {"rescore_query": negative_query}}
+    )
+    assert s.to_dict() == {
+        "query": {
+            "function_score": {
+                "query": {"term": {"tags": "a"}},
+                "functions": [{"script_score": {"script": "_score * 1"}}],
+            }
+        },
+        "rescore": {
+            "window_size": 100,
+            "query": {
+                "rescore_query": {
+                    "function_score": {
+                        "query": {"term": {"tags": "b"}},
+                        "functions": [{"script_score": {"script": "_score * -100"}}],
+                    }
+                }
+            },
+        },
+    }
+
+    assert s.to_dict(
+        rescore={"window_size": 10, "query": {"rescore_query": positive_query}}
+    ) == {
+        "query": {
+            "function_score": {
+                "query": {"term": {"tags": "a"}},
+                "functions": [{"script_score": {"script": "_score * 1"}}],
+            }
+        },
+        "rescore": {
+            "window_size": 10,
+            "query": {
+                "rescore_query": {
+                    "function_score": {
+                        "query": {"term": {"tags": "a"}},
+                        "functions": [{"script_score": {"script": "_score * 1"}}],
+                    }
+                }
+            },
+        },
+    }
+
+
+@pytest.mark.anyio
+async def test_empty_search() -> None:
+    s = AsyncEmptySearch(index="index-name")
+    s = s.query("match", lang="java")
+    s.aggs.bucket("versions", "terms", field="version")
+
+    assert await s.count() == 0
+    assert [hit async for hit in s] == []
+    assert [hit async for hit in s.scan()] == []
+    await s.delete()  # should not error
+
+
+def test_suggest_completion() -> None:
+    s = AsyncSearch()
+    s = s.suggest("my_suggestion", "pyhton", completion={"field": "title"})
+
+    assert {
+        "suggest": {
+            "my_suggestion": {"completion": {"field": "title"}, "prefix": "pyhton"}
+        }
+    } == s.to_dict()
+
+
+def test_suggest_regex_query() -> None:
+    s = AsyncSearch()
+    s = s.suggest("my_suggestion", regex="py[thon|py]", completion={"field": "title"})
+
+    assert {
+        "suggest": {
+            "my_suggestion": {"completion": {"field": "title"}, "regex": "py[thon|py]"}
+        }
+    } == s.to_dict()
+
+
+def test_suggest_must_pass_text_or_regex() -> None:
+    s = AsyncSearch()
+    with raises(ValueError):
+        s.suggest("my_suggestion")
+
+
+def test_suggest_can_only_pass_text_or_regex() -> None:
+    s = AsyncSearch()
+    with raises(ValueError):
+        s.suggest("my_suggestion", text="python", regex="py[hton|py]")
+
+
+def test_suggest_regex_must_be_wtih_completion() -> None:
+    s = AsyncSearch()
+    with raises(ValueError):
+        s.suggest("my_suggestion", regex="py[thon|py]")
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_async/test_update_by_query.py 9.2.0-1/test_elasticsearch/test_dsl/_async/test_update_by_query.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_async/test_update_by_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_async/test_update_by_query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,180 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from copy import deepcopy
+from typing import Any
+
+import pytest
+
+from elasticsearch.dsl import AsyncUpdateByQuery, Q
+from elasticsearch.dsl.response import UpdateByQueryResponse
+from elasticsearch.dsl.search_base import SearchBase
+
+
+def test_ubq_starts_with_no_query() -> None:
+    ubq = AsyncUpdateByQuery()
+
+    assert ubq.query._proxied is None
+
+
+def test_ubq_to_dict() -> None:
+    ubq = AsyncUpdateByQuery()
+    assert {} == ubq.to_dict()
+
+    ubq = ubq.query("match", f=42)
+    assert {"query": {"match": {"f": 42}}} == ubq.to_dict()
+
+    assert {"query": {"match": {"f": 42}}, "size": 10} == ubq.to_dict(size=10)
+
+    ubq = AsyncUpdateByQuery(extra={"size": 5})
+    assert {"size": 5} == ubq.to_dict()
+
+    ubq = AsyncUpdateByQuery(extra={"extra_q": Q("term", category="conference")})
+    assert {"extra_q": {"term": {"category": "conference"}}} == ubq.to_dict()
+
+
+def test_complex_example() -> None:
+    ubq = AsyncUpdateByQuery()
+    ubq = (
+        ubq.query("match", title="python")
+        .query(~Q("match", title="ruby"))
+        .filter(Q("term", category="meetup") | Q("term", category="conference"))
+        .script(
+            source="ctx._source.likes += params.f", lang="painless", params={"f": 3}
+        )
+    )
+
+    ubq.query.minimum_should_match = 2
+    assert {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": "meetup"}},
+                                {"term": {"category": "conference"}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [{"match": {"title": "python"}}],
+                "must_not": [{"match": {"title": "ruby"}}],
+                "minimum_should_match": 2,
+            }
+        },
+        "script": {
+            "source": "ctx._source.likes += params.f",
+            "lang": "painless",
+            "params": {"f": 3},
+        },
+    } == ubq.to_dict()
+
+
+def test_exclude() -> None:
+    ubq = AsyncUpdateByQuery()
+    ubq = ubq.exclude("match", title="python")
+
+    assert {
+        "query": {
+            "bool": {
+                "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}]
+            }
+        }
+    } == ubq.to_dict()
+
+
+def test_reverse() -> None:
+    d = {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": "meetup"}},
+                                {"term": {"category": "conference"}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [
+                    {
+                        "bool": {
+                            "must": [{"match": {"title": "python"}}],
+                            "must_not": [{"match": {"title": "ruby"}}],
+                            "minimum_should_match": 2,
+                        }
+                    }
+                ],
+            }
+        },
+        "script": {
+            "source": "ctx._source.likes += params.f",
+            "lang": "painless",
+            "params": {"f": 3},
+        },
+    }
+
+    d2 = deepcopy(d)
+
+    ubq = AsyncUpdateByQuery.from_dict(d)
+
+    assert d == d2
+    assert d == ubq.to_dict()
+
+
+def test_from_dict_doesnt_need_query() -> None:
+    ubq = AsyncUpdateByQuery.from_dict({"script": {"source": "test"}})
+
+    assert {"script": {"source": "test"}} == ubq.to_dict()
+
+
+@pytest.mark.anyio
+async def test_params_being_passed_to_search(async_mock_client: Any) -> None:
+    ubq = AsyncUpdateByQuery(using="mock", index="i")
+    ubq = ubq.params(routing="42")
+    await ubq.execute()
+
+    async_mock_client.update_by_query.assert_called_once_with(index=["i"], routing="42")
+
+
+def test_overwrite_script() -> None:
+    ubq = AsyncUpdateByQuery()
+    ubq = ubq.script(
+        source="ctx._source.likes += params.f", lang="painless", params={"f": 3}
+    )
+    assert {
+        "script": {
+            "source": "ctx._source.likes += params.f",
+            "lang": "painless",
+            "params": {"f": 3},
+        }
+    } == ubq.to_dict()
+    ubq = ubq.script(source="ctx._source.likes++")
+    assert {"script": {"source": "ctx._source.likes++"}} == ubq.to_dict()
+
+
+def test_update_by_query_response_success() -> None:
+    ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": []})
+    assert ubqr.success()
+
+    ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": True, "failures": []})
+    assert not ubqr.success()
+
+    ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": [{}]})
+    assert not ubqr.success()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_sync/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/_sync/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_sync/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_sync/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_document.py 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_document.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_document.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_document.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,1021 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+# this file creates several documents using bad or no types because
+# these are still supported and should be kept functional in spite
+# of not having appropriate type hints. For that reason the comment
+# below disables many mypy checks that fails as a result of this.
+# mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined"
+
+import codecs
+import ipaddress
+import pickle
+import sys
+from datetime import datetime
+from hashlib import md5
+from typing import Annotated, Any, ClassVar, Dict, List, Optional
+
+import pydantic
+import pytest
+from pytest import raises
+
+from elasticsearch.dsl import (
+    Document,
+    Index,
+    InnerDoc,
+    Keyword,
+    M,
+    Mapping,
+    MetaField,
+    Range,
+    analyzer,
+    field,
+    mapped_field,
+    utils,
+)
+from elasticsearch.dsl.document_base import InstrumentedField
+from elasticsearch.dsl.exceptions import IllegalOperation, ValidationException
+from elasticsearch.dsl.pydantic import BaseESModel
+
+
+class MyInner(InnerDoc):
+    old_field = field.Text()
+
+
+class MyDoc(Document):
+    title = field.Keyword()
+    name = field.Text()
+    created_at = field.Date()
+    inner = field.Object(MyInner)
+
+
+class MySubDoc(MyDoc):
+    name = field.Keyword()
+
+    class Index:
+        name = "default-index"
+
+
+class MyDoc2(Document):
+    extra = field.Long()
+
+
+class MyMultiSubDoc(MyDoc2, MySubDoc):
+    pass
+
+
+class Comment(InnerDoc):
+    title = field.Text()
+    tags = field.Keyword(multi=True)
+
+
+class DocWithNested(Document):
+    comments = field.Nested(Comment)
+
+    class Index:
+        name = "test-doc-with-nested"
+
+
+class SimpleCommit(Document):
+    files = field.Text(multi=True)
+
+    class Index:
+        name = "test-git"
+
+
+class Secret(str):
+    pass
+
+
+class SecretField(field.CustomField):
+    builtin_type = "text"
+
+    def _serialize(self, data: Any) -> Any:
+        return codecs.encode(data, "rot_13")
+
+    def _deserialize(self, data: Any) -> Any:
+        if isinstance(data, Secret):
+            return data
+        return Secret(codecs.decode(data, "rot_13"))
+
+
+class SecretDoc(Document):
+    title = SecretField(index="no")
+
+    class Index:
+        name = "test-secret-doc"
+
+
+class NestedSecret(Document):
+    secrets = field.Nested(SecretDoc)
+
+    class Index:
+        name = "test-nested-secret"
+
+
+class OptionalObjectWithRequiredField(Document):
+    comments = field.Nested(properties={"title": field.Keyword(required=True)})
+
+    class Index:
+        name = "test-required"
+
+
+class Host(Document):
+    ip = field.Ip()
+
+    class Index:
+        name = "test-host"
+
+
+def test_range_serializes_properly() -> None:
+    class D(Document):
+        lr: Range[int] = field.LongRange()
+
+    d = D(lr=Range(lt=42))
+    assert 40 in d.lr
+    assert 47 not in d.lr
+    assert {"lr": {"lt": 42}} == d.to_dict()
+
+    d = D(lr={"lt": 42})
+    assert {"lr": {"lt": 42}} == d.to_dict()
+
+
+def test_range_deserializes_properly() -> None:
+    class D(InnerDoc):
+        lr = field.LongRange()
+
+    d = D.from_es({"lr": {"lt": 42}}, True)
+    assert isinstance(d.lr, Range)
+    assert 40 in d.lr
+    assert 47 not in d.lr
+
+
+def test_resolve_nested() -> None:
+    nested, field = NestedSecret._index.resolve_nested("secrets.title")
+    assert nested == ["secrets"]
+    assert field is NestedSecret._doc_type.mapping["secrets"]["title"]
+
+
+def test_conflicting_mapping_raises_error_in_index_to_dict() -> None:
+    class A(Document):
+        name = field.Text()
+
+    class B(Document):
+        name = field.Keyword()
+
+    i = Index("i")
+    i.document(A)
+    i.document(B)
+
+    with raises(ValueError):
+        i.to_dict()
+
+
+def test_ip_address_serializes_properly() -> None:
+    host = Host(ip=ipaddress.IPv4Address("10.0.0.1"))
+
+    assert {"ip": "10.0.0.1"} == host.to_dict()
+
+
+def test_matches_uses_index() -> None:
+    assert SimpleCommit._matches({"_index": "test-git"})
+    assert not SimpleCommit._matches({"_index": "not-test-git"})
+
+
+def test_matches_with_no_name_always_matches() -> None:
+    class D(Document):
+        pass
+
+    assert D._matches({})
+    assert D._matches({"_index": "whatever"})
+
+
+def test_matches_accepts_wildcards() -> None:
+    class MyDoc(Document):
+        class Index:
+            name = "my-*"
+
+    assert MyDoc._matches({"_index": "my-index"})
+    assert not MyDoc._matches({"_index": "not-my-index"})
+
+
+def test_assigning_attrlist_to_field() -> None:
+    sc = SimpleCommit()
+    l = ["README", "README.rst"]
+    sc.files = utils.AttrList(l)
+
+    assert sc.to_dict()["files"] is l
+
+
+def test_optional_inner_objects_are_not_validated_if_missing() -> None:
+    d = OptionalObjectWithRequiredField()
+
+    d.full_clean()
+
+
+def test_custom_field() -> None:
+    s = SecretDoc(title=Secret("Hello"))
+
+    assert {"title": "Uryyb"} == s.to_dict()
+    assert s.title == "Hello"
+
+    s = SecretDoc.from_es({"_source": {"title": "Uryyb"}})
+    assert s.title == "Hello"
+    assert isinstance(s.title, Secret)
+
+
+def test_custom_field_mapping() -> None:
+    assert {
+        "properties": {"title": {"index": "no", "type": "text"}}
+    } == SecretDoc._doc_type.mapping.to_dict()
+
+
+def test_custom_field_in_nested() -> None:
+    s = NestedSecret()
+    s.secrets.append(SecretDoc(title=Secret("Hello")))
+
+    assert {"secrets": [{"title": "Uryyb"}]} == s.to_dict()
+    assert s.secrets[0].title == "Hello"
+
+
+def test_multi_works_after_doc_has_been_saved() -> None:
+    c = SimpleCommit()
+    c.full_clean()
+    c.files.append("setup.py")
+
+    assert c.to_dict() == {"files": ["setup.py"]}
+
+
+def test_multi_works_in_nested_after_doc_has_been_serialized() -> None:
+    # Issue #359
+    c = DocWithNested(comments=[Comment(title="First!")])
+
+    assert [] == c.comments[0].tags
+    assert {"comments": [{"title": "First!"}]} == c.to_dict()
+    assert [] == c.comments[0].tags
+
+
+def test_null_value_for_object() -> None:
+    d = MyDoc(inner=None)
+
+    assert d.inner is None
+
+
+def test_inherited_doc_types_can_override_index() -> None:
+    class MyDocDifferentIndex(MySubDoc):
+        class Index:
+            name = "not-default-index"
+            settings = {"number_of_replicas": 0}
+            aliases: Dict[str, Any] = {"a": {}}
+            analyzers = [analyzer("my_analizer", tokenizer="keyword")]
+
+    assert MyDocDifferentIndex._index._name == "not-default-index"
+    assert MyDocDifferentIndex()._get_index() == "not-default-index"
+    assert MyDocDifferentIndex._index.to_dict() == {
+        "aliases": {"a": {}},
+        "mappings": {
+            "properties": {
+                "created_at": {"type": "date"},
+                "inner": {
+                    "type": "object",
+                    "properties": {"old_field": {"type": "text"}},
+                },
+                "name": {"type": "keyword"},
+                "title": {"type": "keyword"},
+            }
+        },
+        "settings": {
+            "analysis": {
+                "analyzer": {"my_analizer": {"tokenizer": "keyword", "type": "custom"}}
+            },
+            "number_of_replicas": 0,
+        },
+    }
+
+
+def test_to_dict_with_meta() -> None:
+    d = MySubDoc(title="hello")
+    d.meta.routing = "some-parent"
+
+    assert {
+        "_index": "default-index",
+        "_routing": "some-parent",
+        "_source": {"title": "hello"},
+    } == d.to_dict(True)
+
+
+def test_to_dict_with_meta_includes_custom_index() -> None:
+    d = MySubDoc(title="hello")
+    d.meta.index = "other-index"
+
+    assert {"_index": "other-index", "_source": {"title": "hello"}} == d.to_dict(True)
+
+
+def test_to_dict_without_skip_empty_will_include_empty_fields() -> None:
+    d = MySubDoc(tags=[], title=None, inner={})
+
+    assert {} == d.to_dict()
+    assert {"tags": [], "title": None, "inner": {}} == d.to_dict(skip_empty=False)
+
+
+def test_attribute_can_be_removed() -> None:
+    d = MyDoc(title="hello")
+
+    del d.title
+    assert "title" not in d._d_
+
+
+def test_doc_type_can_be_correctly_pickled() -> None:
+    d = DocWithNested(
+        title="Hello World!", comments=[Comment(title="hellp")], meta={"id": 42}
+    )
+    s = pickle.dumps(d)
+
+    d2 = pickle.loads(s)
+
+    assert d2 == d
+    assert 42 == d2.meta.id
+    assert "Hello World!" == d2.title
+    assert [{"title": "hellp"}] == d2.comments
+    assert isinstance(d2.comments[0], Comment)
+
+
+def test_meta_is_accessible_even_on_empty_doc() -> None:
+    d = MyDoc()
+    d.meta
+
+    d = MyDoc(title="aaa")
+    d.meta
+
+
+def test_meta_field_mapping() -> None:
+    class User(Document):
+        username = field.Text()
+
+        class Meta:
+            all = MetaField(enabled=False)
+            _index = MetaField(enabled=True)
+            dynamic = MetaField("strict")
+            dynamic_templates = MetaField([42])
+
+    assert {
+        "properties": {"username": {"type": "text"}},
+        "_all": {"enabled": False},
+        "_index": {"enabled": True},
+        "dynamic": "strict",
+        "dynamic_templates": [42],
+    } == User._doc_type.mapping.to_dict()
+
+
+def test_multi_value_fields() -> None:
+    class Blog(Document):
+        tags = field.Keyword(multi=True)
+
+    b = Blog()
+    assert [] == b.tags
+    b.tags.append("search")
+    b.tags.append("python")
+    assert ["search", "python"] == b.tags
+
+
+def test_docs_with_properties() -> None:
+    class User(Document):
+        pwd_hash: str = field.Text()
+
+        def check_password(self, pwd: bytes) -> bool:
+            return md5(pwd).hexdigest() == self.pwd_hash
+
+        @property
+        def password(self) -> None:
+            raise AttributeError("readonly")
+
+        @password.setter
+        def password(self, pwd: bytes) -> None:
+            self.pwd_hash = md5(pwd).hexdigest()
+
+    u = User(pwd_hash=md5(b"secret").hexdigest())
+    assert u.check_password(b"secret")
+    assert not u.check_password(b"not-secret")
+
+    u.password = b"not-secret"
+    assert "password" not in u._d_
+    assert not u.check_password(b"secret")
+    assert u.check_password(b"not-secret")
+
+    with raises(AttributeError):
+        u.password
+
+
+def test_nested_can_be_assigned_to() -> None:
+    d1 = DocWithNested(comments=[Comment(title="First!")])
+    d2 = DocWithNested()
+
+    d2.comments = d1.comments
+    assert isinstance(d1.comments[0], Comment)
+    assert d2.comments == [{"title": "First!"}]
+    assert {"comments": [{"title": "First!"}]} == d2.to_dict()
+    assert isinstance(d2.comments[0], Comment)
+
+
+def test_nested_can_be_none() -> None:
+    d = DocWithNested(comments=None, title="Hello World!")
+
+    assert {"title": "Hello World!"} == d.to_dict()
+
+
+def test_nested_defaults_to_list_and_can_be_updated() -> None:
+    md = DocWithNested()
+
+    assert [] == md.comments
+
+    md.comments.append({"title": "hello World!"})
+    assert {"comments": [{"title": "hello World!"}]} == md.to_dict()
+
+
+def test_to_dict_is_recursive_and_can_cope_with_multi_values() -> None:
+    md = MyDoc(name=["a", "b", "c"])
+    md.inner = [MyInner(old_field="of1"), MyInner(old_field="of2")]
+
+    assert isinstance(md.inner[0], MyInner)
+
+    assert {
+        "name": ["a", "b", "c"],
+        "inner": [{"old_field": "of1"}, {"old_field": "of2"}],
+    } == md.to_dict()
+
+
+def test_to_dict_ignores_empty_collections() -> None:
+    md = MySubDoc(name="", address={}, count=0, valid=False, tags=[])
+
+    assert {"name": "", "count": 0, "valid": False} == md.to_dict()
+
+
+def test_declarative_mapping_definition() -> None:
+    assert issubclass(MyDoc, Document)
+    assert hasattr(MyDoc, "_doc_type")
+    assert {
+        "properties": {
+            "created_at": {"type": "date"},
+            "name": {"type": "text"},
+            "title": {"type": "keyword"},
+            "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}},
+        }
+    } == MyDoc._doc_type.mapping.to_dict()
+
+
+def test_you_can_supply_own_mapping_instance() -> None:
+    class MyD(Document):
+        title = field.Text()
+
+        class Meta:
+            mapping = Mapping()
+            mapping.meta("_all", enabled=False)
+
+    assert {
+        "_all": {"enabled": False},
+        "properties": {"title": {"type": "text"}},
+    } == MyD._doc_type.mapping.to_dict()
+
+
+def test_document_can_be_created_dynamically() -> None:
+    n = datetime.now()
+    md = MyDoc(title="hello")
+    md.name = "My Fancy Document!"
+    md.created_at = n
+
+    inner = md.inner
+    # consistent returns
+    assert inner is md.inner
+    inner.old_field = "Already defined."
+
+    md.inner.new_field = ["undefined", "field"]
+
+    assert {
+        "title": "hello",
+        "name": "My Fancy Document!",
+        "created_at": n,
+        "inner": {"old_field": "Already defined.", "new_field": ["undefined", "field"]},
+    } == md.to_dict()
+
+
+def test_invalid_date_will_raise_exception() -> None:
+    md = MyDoc()
+    md.created_at = "not-a-date"
+    with raises(ValidationException):
+        md.full_clean()
+
+
+def test_document_inheritance() -> None:
+    assert issubclass(MySubDoc, MyDoc)
+    assert issubclass(MySubDoc, Document)
+    assert hasattr(MySubDoc, "_doc_type")
+    assert {
+        "properties": {
+            "created_at": {"type": "date"},
+            "name": {"type": "keyword"},
+            "title": {"type": "keyword"},
+            "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}},
+        }
+    } == MySubDoc._doc_type.mapping.to_dict()
+
+
+def test_child_class_can_override_parent() -> None:
+    class A(Document):
+        o = field.Object(dynamic=False, properties={"a": field.Text()})
+
+    class B(A):
+        o = field.Object(dynamic="strict", properties={"b": field.Text()})
+
+    assert {
+        "properties": {
+            "o": {
+                "dynamic": "strict",
+                "properties": {"a": {"type": "text"}, "b": {"type": "text"}},
+                "type": "object",
+            }
+        }
+    } == B._doc_type.mapping.to_dict()
+
+
+def test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict() -> None:
+    md = MySubDoc(meta={"id": 42}, name="My First doc!")
+
+    md.meta.index = "my-index"
+    assert md.meta.index == "my-index"
+    assert md.meta.id == 42
+    assert {"name": "My First doc!"} == md.to_dict()
+    assert {"id": 42, "index": "my-index"} == md.meta.to_dict()
+
+
+def test_index_inheritance() -> None:
+    assert issubclass(MyMultiSubDoc, MySubDoc)
+    assert issubclass(MyMultiSubDoc, MyDoc2)
+    assert issubclass(MyMultiSubDoc, Document)
+    assert hasattr(MyMultiSubDoc, "_doc_type")
+    assert hasattr(MyMultiSubDoc, "_index")
+    assert {
+        "properties": {
+            "created_at": {"type": "date"},
+            "name": {"type": "keyword"},
+            "title": {"type": "keyword"},
+            "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}},
+            "extra": {"type": "long"},
+        }
+    } == MyMultiSubDoc._doc_type.mapping.to_dict()
+
+
+def test_meta_fields_can_be_set_directly_in_init() -> None:
+    p = object()
+    md = MyDoc(_id=p, title="Hello World!")
+
+    assert md.meta.id is p
+
+
+@pytest.mark.sync
+def test_save_no_index(mock_client: Any) -> None:
+    md = MyDoc()
+    with raises(ValidationException):
+        md.save(using="mock")
+
+
+@pytest.mark.sync
+def test_delete_no_index(mock_client: Any) -> None:
+    md = MyDoc()
+    with raises(ValidationException):
+        md.delete(using="mock")
+
+
+@pytest.mark.sync
+def test_update_no_fields() -> None:
+    md = MyDoc()
+    with raises(IllegalOperation):
+        md.update()
+
+
+def test_search_with_custom_alias_and_index() -> None:
+    search_object = MyDoc.search(
+        using="staging", index=["custom_index1", "custom_index2"]
+    )
+
+    assert search_object._using == "staging"
+    assert search_object._index == ["custom_index1", "custom_index2"]
+
+
+def test_from_es_respects_underscored_non_meta_fields() -> None:
+    doc = {
+        "_index": "test-index",
+        "_id": "elasticsearch",
+        "_score": 12.0,
+        "fields": {"hello": "world", "_routing": "es", "_tags": ["search"]},
+        "_source": {
+            "city": "Amsterdam",
+            "name": "Elasticsearch",
+            "_tagline": "You know, for search",
+        },
+    }
+
+    class Company(Document):
+        class Index:
+            name = "test-company"
+
+    c = Company.from_es(doc)
+
+    assert c.meta.fields._tags == ["search"]
+    assert c.meta.fields._routing == "es"
+    assert c._tagline == "You know, for search"
+
+
+def test_nested_and_object_inner_doc() -> None:
+    class MySubDocWithNested(MyDoc):
+        nested_inner = field.Nested(MyInner)
+
+    props = MySubDocWithNested._doc_type.mapping.to_dict()["properties"]
+    assert props == {
+        "created_at": {"type": "date"},
+        "inner": {"properties": {"old_field": {"type": "text"}}, "type": "object"},
+        "name": {"type": "text"},
+        "nested_inner": {
+            "properties": {"old_field": {"type": "text"}},
+            "type": "nested",
+        },
+        "title": {"type": "keyword"},
+    }
+
+
+def test_doc_with_type_hints() -> None:
+    class TypedInnerDoc(InnerDoc):
+        st: M[str]
+        dt: M[Optional[datetime]]
+        li: M[List[int]]
+
+    class TypedDoc(Document):
+        st: str
+        dt: Optional[datetime]
+        li: List[int]
+        ob: TypedInnerDoc
+        ns: List[TypedInnerDoc]
+        ip: Optional[str] = field.Ip()
+        k1: str = field.Keyword(required=True)
+        k2: M[str] = field.Keyword()
+        k3: str = mapped_field(field.Keyword(), default="foo")
+        k4: M[Optional[str]] = mapped_field(field.Keyword())  # type: ignore[misc]
+        s1: Secret = SecretField()
+        s2: M[Secret] = SecretField()
+        s3: Secret = mapped_field(SecretField())  # type: ignore[misc]
+        s4: M[Optional[Secret]] = mapped_field(
+            SecretField(), default_factory=lambda: "foo"
+        )
+        i1: ClassVar
+        i2: ClassVar[int]
+        i3: str = mapped_field(exclude=True)
+
+    class TypedDocAnnotated(Document):
+        st: Annotated[str, "foo"]
+        dt: Annotated[Optional[datetime], "bar"]
+        li: Annotated[List[int], "baz"]
+        ob: Annotated[TypedInnerDoc, "qux"]
+        ns: Annotated[List[TypedInnerDoc], "quux"]
+        ip: Annotated[Optional[str], field.Ip()]
+        k1: Annotated[str, field.Keyword(required=True)]
+        k2: Annotated[M[str], field.Keyword()]
+        k3: Annotated[str, mapped_field(field.Keyword(), default="foo")]
+        k4: Annotated[M[Optional[str]], mapped_field(field.Keyword())]  # type: ignore[misc]
+        s1: Annotated[Secret, SecretField()]
+        s2: Annotated[M[Secret], SecretField()]
+        s3: Annotated[Secret, mapped_field(SecretField())]  # type: ignore[misc]
+        s4: Annotated[
+            M[Optional[Secret]],
+            mapped_field(SecretField(), default_factory=lambda: "foo"),
+        ]
+        if sys.version_info[0] > 3 or (
+            sys.version_info[0] == 3 and sys.version_info[1] >= 11
+        ):
+            i1: Annotated[ClassVar, "classvar"]
+            i2: Annotated[ClassVar[int], "classvar"]
+        else:
+            i1: ClassVar
+            i2: ClassVar[int]
+        i3: Annotated[str, mapped_field(exclude=True)]
+
+    for doc_class in [TypedDoc, TypedDocAnnotated]:
+        props = doc_class._doc_type.mapping.to_dict()["properties"]
+        assert props == {
+            "st": {"type": "text"},
+            "dt": {"type": "date"},
+            "li": {"type": "integer"},
+            "ob": {
+                "type": "object",
+                "properties": {
+                    "st": {"type": "text"},
+                    "dt": {"type": "date"},
+                    "li": {"type": "integer"},
+                },
+            },
+            "ns": {
+                "type": "nested",
+                "properties": {
+                    "st": {"type": "text"},
+                    "dt": {"type": "date"},
+                    "li": {"type": "integer"},
+                },
+            },
+            "ip": {"type": "ip"},
+            "k1": {"type": "keyword"},
+            "k2": {"type": "keyword"},
+            "k3": {"type": "keyword"},
+            "k4": {"type": "keyword"},
+            "s1": {"type": "text"},
+            "s2": {"type": "text"},
+            "s3": {"type": "text"},
+            "s4": {"type": "text"},
+        }
+
+        doc_class.i1 = "foo"
+        doc_class.i2 = 123
+        doc_class.i3 = "bar"
+
+        doc = doc_class()
+        assert doc.k3 == "foo"
+        assert doc.s4 == "foo"
+        with raises(ValidationException) as exc_info:
+            doc.full_clean()
+        assert set(exc_info.value.args[0].keys()) == {
+            "st",
+            "k1",
+            "k2",
+            "ob",
+            "s1",
+            "s2",
+            "s3",
+        }
+
+        assert doc_class.i1 == "foo"
+        assert doc_class.i2 == 123
+        assert doc_class.i3 == "bar"
+
+        doc.st = "s"
+        doc.li = [1, 2, 3]
+        doc.k1 = "k1"
+        doc.k2 = "k2"
+        doc.ob.st = "s"
+        doc.ob.li = [1]
+        doc.s1 = "s1"
+        doc.s2 = "s2"
+        doc.s3 = "s3"
+        doc.full_clean()
+
+        doc.ob = TypedInnerDoc(li=[1])
+        with raises(ValidationException) as exc_info:
+            doc.full_clean()
+        assert set(exc_info.value.args[0].keys()) == {"ob"}
+        assert set(exc_info.value.args[0]["ob"][0].args[0].keys()) == {"st"}
+
+        doc.ob.st = "s"
+        doc.ns.append(TypedInnerDoc(li=[1, 2]))
+        with raises(ValidationException) as exc_info:
+            doc.full_clean()
+
+        doc.ns[0].st = "s"
+        doc.full_clean()
+
+        doc.ip = "1.2.3.4"
+        n = datetime.now()
+        doc.dt = n
+        assert doc.to_dict() == {
+            "st": "s",
+            "li": [1, 2, 3],
+            "dt": n,
+            "ob": {
+                "st": "s",
+                "li": [1],
+            },
+            "ns": [
+                {
+                    "st": "s",
+                    "li": [1, 2],
+                }
+            ],
+            "ip": "1.2.3.4",
+            "k1": "k1",
+            "k2": "k2",
+            "k3": "foo",
+            "s1": "s1",
+            "s2": "s2",
+            "s3": "s3",
+            "s4": "foo",
+        }
+
+        s = doc_class.search().sort(doc_class.st, -doc_class.dt, +doc_class.ob.st)
+        s.aggs.bucket("terms_agg", "terms", field=doc_class.k1)
+        d = s.to_dict()
+        assert d["aggs"] == {"terms_agg": {"terms": {"field": "k1"}}}
+        assert d["sort"] == ["st", {"dt": {"order": "desc"}}, "ob.st"]
+
+
+@pytest.mark.skipif(sys.version_info < (3, 10), reason="requires Python 3.10")
+def test_doc_with_pipe_type_hints() -> None:
+    with pytest.raises(TypeError):
+
+        class BadlyTypedDoc(Document):
+            s: str
+            f: str | int | None  # type: ignore[syntax]
+
+    class TypedDoc(Document):
+        s: str
+        f1: str | None  # type: ignore[syntax]
+        f2: M[int | None]  # type: ignore[syntax]
+        f3: M[datetime | None]  # type: ignore[syntax]
+
+    props = TypedDoc._doc_type.mapping.to_dict()["properties"]
+    assert props == {
+        "s": {"type": "text"},
+        "f1": {"type": "text"},
+        "f2": {"type": "integer"},
+        "f3": {"type": "date"},
+    }
+
+    doc = TypedDoc()
+    with raises(ValidationException) as exc_info:
+        doc.full_clean()
+    assert set(exc_info.value.args[0].keys()) == {"s"}
+    doc.s = "s"
+    doc.full_clean()
+
+
+def test_instrumented_field() -> None:
+    class Child(InnerDoc):
+        st: M[str]
+
+    class Doc(Document):
+        st: str
+        ob: Child
+        ns: List[Child]
+
+    doc = Doc(
+        st="foo",
+        ob=Child(st="bar"),
+        ns=[
+            Child(st="baz"),
+            Child(st="qux"),
+        ],
+    )
+
+    assert type(doc.st) is str
+    assert doc.st == "foo"
+
+    assert type(doc.ob) is Child
+    assert doc.ob.st == "bar"
+
+    assert type(doc.ns) is utils.AttrList
+    assert doc.ns[0].st == "baz"
+    assert doc.ns[1].st == "qux"
+    assert type(doc.ns[0]) is Child
+    assert type(doc.ns[1]) is Child
+
+    assert type(Doc.st) is InstrumentedField
+    assert str(Doc.st) == "st"
+    assert +Doc.st == "st"
+    assert -Doc.st == "-st"
+    assert Doc.st.to_dict() == {"type": "text"}
+    with raises(AttributeError):
+        Doc.st.something
+
+    assert type(Doc.ob) is InstrumentedField
+    assert str(Doc.ob) == "ob"
+    assert str(Doc.ob.st) == "ob.st"
+    assert +Doc.ob.st == "ob.st"
+    assert -Doc.ob.st == "-ob.st"
+    assert Doc.ob.st.to_dict() == {"type": "text"}
+    with raises(AttributeError):
+        Doc.ob.something
+    with raises(AttributeError):
+        Doc.ob.st.something
+
+    assert type(Doc.ns) is InstrumentedField
+    assert str(Doc.ns) == "ns"
+    assert str(Doc.ns.st) == "ns.st"
+    assert +Doc.ns.st == "ns.st"
+    assert -Doc.ns.st == "-ns.st"
+    assert Doc.ns.st.to_dict() == {"type": "text"}
+    with raises(AttributeError):
+        Doc.ns.something
+    with raises(AttributeError):
+        Doc.ns.st.something
+
+
+def test_pydantic_integration() -> None:
+    class Location(pydantic.BaseModel):
+        city: str
+        country: str
+
+    class Address(pydantic.BaseModel):
+        street: str
+        location: Location
+
+    class Phone(pydantic.BaseModel):
+        type: str
+        number: str
+
+    class User(BaseESModel):
+        name: Annotated[str, Keyword()] = pydantic.Field(default="Unknown")
+        bio: str
+        age: int = pydantic.Field(strict=True, ge=0)
+        address: Address = pydantic.Field(
+            default=Address(
+                street="Unknown", location=Location(city="Unknown", country="Unknown")
+            )
+        )
+        phones: list[Phone] = pydantic.Field(default=[])
+
+    u = User(
+        name="foo",
+        bio="lorem ipsum",
+        age=30,
+        address=Address(
+            street="123 Main St.",
+            location=Location(city="San Francisco", country="USA"),
+        ),
+    )
+    u.phones.append(Phone(type="Home", number="+12345678900"))
+    assert u.model_dump() == {
+        "name": "foo",
+        "bio": "lorem ipsum",
+        "age": 30,
+        "address": {
+            "street": "123 Main St.",
+            "location": {
+                "city": "San Francisco",
+                "country": "USA",
+            },
+        },
+        "phones": [
+            {
+                "type": "Home",
+                "number": "+12345678900",
+            }
+        ],
+        "meta": {
+            "id": "",
+            "index": "",
+            "primary_term": 0,
+            "score": 0,
+            "seq_no": 0,
+            "version": 0,
+        },
+    }
+    udoc = u.to_doc()
+    assert udoc.name == "foo"
+    assert udoc.bio == "lorem ipsum"
+    assert udoc.age == 30
+    assert udoc.to_dict() == {
+        "name": "foo",
+        "bio": "lorem ipsum",
+        "age": 30,
+        "address": {
+            "street": "123 Main St.",
+            "location": {
+                "city": "San Francisco",
+                "country": "USA",
+            },
+        },
+        "phones": [
+            {
+                "type": "Home",
+                "number": "+12345678900",
+            }
+        ],
+    }
+
+    u2 = User.from_doc(udoc)
+    assert u == u2
+
+    with raises(pydantic.ValidationError):
+        _ = User()
+
+    with raises(pydantic.ValidationError):
+        _ = User(name="foo", bio="lorem ipsum")
+
+    with raises(pydantic.ValidationError):
+        _ = User(name="foo", bio="lorem ipsum", age=-10)
+
+    u3 = User(bio="lorem ipsum", age=30)
+    assert u3.name == "Unknown"
+    assert u3.address.location.country == "Unknown"
+    assert u3.phones == []
+    assert u3.to_doc().name == "Unknown"
+    assert u3.to_doc().address.location.country == "Unknown"
+    assert u3.to_doc().phones == []
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_faceted_search.py 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_faceted_search.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_faceted_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_faceted_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,201 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime
+
+import pytest
+
+from elasticsearch.dsl.faceted_search import (
+    DateHistogramFacet,
+    FacetedSearch,
+    TermsFacet,
+)
+
+
+class BlogSearch(FacetedSearch):
+    doc_types = ["user", "post"]
+    fields = [
+        "title^5",
+        "body",
+    ]
+
+    facets = {
+        "category": TermsFacet(field="category.raw"),
+        "tags": TermsFacet(field="tags"),
+    }
+
+
+def test_query_is_created_properly() -> None:
+    bs = BlogSearch("python search")
+    s = bs.build_search()
+
+    assert s._doc_type == ["user", "post"]
+    assert {
+        "aggs": {
+            "_filter_tags": {
+                "filter": {"match_all": {}},
+                "aggs": {"tags": {"terms": {"field": "tags"}}},
+            },
+            "_filter_category": {
+                "filter": {"match_all": {}},
+                "aggs": {"category": {"terms": {"field": "category.raw"}}},
+            },
+        },
+        "query": {
+            "multi_match": {"fields": ["title^5", "body"], "query": "python search"}
+        },
+        "highlight": {"fields": {"body": {}, "title": {}}},
+    } == s.to_dict()
+
+
+def test_query_is_created_properly_with_sort_tuple() -> None:
+    bs = BlogSearch("python search", sort=("category", "-title"))
+    s = bs.build_search()
+
+    assert s._doc_type == ["user", "post"]
+    assert {
+        "aggs": {
+            "_filter_tags": {
+                "filter": {"match_all": {}},
+                "aggs": {"tags": {"terms": {"field": "tags"}}},
+            },
+            "_filter_category": {
+                "filter": {"match_all": {}},
+                "aggs": {"category": {"terms": {"field": "category.raw"}}},
+            },
+        },
+        "query": {
+            "multi_match": {"fields": ["title^5", "body"], "query": "python search"}
+        },
+        "highlight": {"fields": {"body": {}, "title": {}}},
+        "sort": ["category", {"title": {"order": "desc"}}],
+    } == s.to_dict()
+
+
+def test_filter_is_applied_to_search_but_not_relevant_facet() -> None:
+    bs = BlogSearch("python search", filters={"category": "elastic"})
+    s = bs.build_search()
+
+    assert {
+        "aggs": {
+            "_filter_tags": {
+                "filter": {"terms": {"category.raw": ["elastic"]}},
+                "aggs": {"tags": {"terms": {"field": "tags"}}},
+            },
+            "_filter_category": {
+                "filter": {"match_all": {}},
+                "aggs": {"category": {"terms": {"field": "category.raw"}}},
+            },
+        },
+        "post_filter": {"terms": {"category.raw": ["elastic"]}},
+        "query": {
+            "multi_match": {"fields": ["title^5", "body"], "query": "python search"}
+        },
+        "highlight": {"fields": {"body": {}, "title": {}}},
+    } == s.to_dict()
+
+
+def test_filters_are_applied_to_search_ant_relevant_facets() -> None:
+    bs = BlogSearch(
+        "python search", filters={"category": "elastic", "tags": ["python", "django"]}
+    )
+    s = bs.build_search()
+
+    d = s.to_dict()
+
+    # we need to test post_filter without relying on order
+    f = d["post_filter"]["bool"].pop("must")
+    assert len(f) == 2
+    assert {"terms": {"category.raw": ["elastic"]}} in f
+    assert {"terms": {"tags": ["python", "django"]}} in f
+
+    assert {
+        "aggs": {
+            "_filter_tags": {
+                "filter": {"terms": {"category.raw": ["elastic"]}},
+                "aggs": {"tags": {"terms": {"field": "tags"}}},
+            },
+            "_filter_category": {
+                "filter": {"terms": {"tags": ["python", "django"]}},
+                "aggs": {"category": {"terms": {"field": "category.raw"}}},
+            },
+        },
+        "query": {
+            "multi_match": {"fields": ["title^5", "body"], "query": "python search"}
+        },
+        "post_filter": {"bool": {}},
+        "highlight": {"fields": {"body": {}, "title": {}}},
+    } == d
+
+
+def test_date_histogram_facet_with_1970_01_01_date() -> None:
+    dhf = DateHistogramFacet()
+    assert dhf.get_value({"key": None}) == datetime(1970, 1, 1, 0, 0)  # type: ignore[arg-type]
+    assert dhf.get_value({"key": 0}) == datetime(1970, 1, 1, 0, 0)  # type: ignore[arg-type]
+
+
+@pytest.mark.parametrize(
+    ["interval_type", "interval"],
+    [
+        ("interval", "year"),
+        ("calendar_interval", "year"),
+        ("interval", "month"),
+        ("calendar_interval", "month"),
+        ("interval", "week"),
+        ("calendar_interval", "week"),
+        ("interval", "day"),
+        ("calendar_interval", "day"),
+        ("fixed_interval", "day"),
+        ("interval", "hour"),
+        ("fixed_interval", "hour"),
+        ("interval", "1Y"),
+        ("calendar_interval", "1Y"),
+        ("interval", "1M"),
+        ("calendar_interval", "1M"),
+        ("interval", "1w"),
+        ("calendar_interval", "1w"),
+        ("interval", "1d"),
+        ("calendar_interval", "1d"),
+        ("fixed_interval", "1d"),
+        ("interval", "1h"),
+        ("fixed_interval", "1h"),
+    ],
+)
+def test_date_histogram_interval_types(interval_type: str, interval: str) -> None:
+    dhf = DateHistogramFacet(field="@timestamp", **{interval_type: interval})
+    assert dhf.get_aggregation().to_dict() == {
+        "date_histogram": {
+            "field": "@timestamp",
+            interval_type: interval,
+            "min_doc_count": 0,
+        }
+    }
+    dhf.get_value_filter(datetime.now())
+
+
+def test_date_histogram_no_interval_keyerror() -> None:
+    dhf = DateHistogramFacet(field="@timestamp")
+    with pytest.raises(KeyError) as e:
+        dhf.get_value_filter(datetime.now())
+    assert str(e.value) == "'interval'"
+
+
+def test_params_added_to_search() -> None:
+    bs = BlogSearch("python search")
+    assert bs._s._params == {}
+    bs.params(routing="42")
+    assert bs._s._params == {"routing": "42"}
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_index.py 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_index.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_index.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_index.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,197 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import string
+from random import choice
+from typing import Any, Dict
+
+import pytest
+from pytest import raises
+
+from elasticsearch.dsl import (
+    Date,
+    Document,
+    Index,
+    IndexTemplate,
+    Text,
+    analyzer,
+)
+
+
+class Post(Document):
+    title = Text()
+    published_from = Date()
+
+
+def test_multiple_doc_types_will_combine_mappings() -> None:
+    class User(Document):
+        username = Text()
+
+    i = Index("i")
+    i.document(Post)
+    i.document(User)
+    assert {
+        "mappings": {
+            "properties": {
+                "title": {"type": "text"},
+                "username": {"type": "text"},
+                "published_from": {"type": "date"},
+            }
+        }
+    } == i.to_dict()
+
+
+def test_search_is_limited_to_index_name() -> None:
+    i = Index("my-index")
+    s = i.search()
+
+    assert s._index == ["my-index"]
+
+
+def test_cloned_index_has_copied_settings_and_using() -> None:
+    client = object()
+    i = Index("my-index", using=client)  # type: ignore[arg-type]
+    i.settings(number_of_shards=1)
+
+    i2 = i.clone("my-other-index")
+
+    assert "my-other-index" == i2._name
+    assert client is i2._using
+    assert i._settings == i2._settings
+    assert i._settings is not i2._settings
+
+
+def test_cloned_index_has_analysis_attribute() -> None:
+    """
+    Regression test for Issue #582 in which `AsyncIndex.clone()` was not copying
+    over the `_analysis` attribute.
+    """
+    client = object()
+    i = Index("my-index", using=client)  # type: ignore[arg-type]
+
+    random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100))
+    random_analyzer = analyzer(
+        random_analyzer_name, tokenizer="standard", filter="standard"
+    )
+
+    i.analyzer(random_analyzer)
+
+    i2 = i.clone("my-clone-index")
+
+    assert i.to_dict()["settings"]["analysis"] == i2.to_dict()["settings"]["analysis"]
+
+
+def test_settings_are_saved() -> None:
+    i = Index("i")
+    i.settings(number_of_replicas=0)
+    i.settings(number_of_shards=1)
+
+    assert {"settings": {"number_of_shards": 1, "number_of_replicas": 0}} == i.to_dict()
+
+
+def test_registered_doc_type_included_in_to_dict() -> None:
+    i = Index("i", using="alias")
+    i.document(Post)
+
+    assert {
+        "mappings": {
+            "properties": {
+                "title": {"type": "text"},
+                "published_from": {"type": "date"},
+            }
+        }
+    } == i.to_dict()
+
+
+def test_registered_doc_type_included_in_search() -> None:
+    i = Index("i", using="alias")
+    i.document(Post)
+
+    s = i.search()
+
+    assert s._doc_type == [Post]
+
+
+def test_aliases_add_to_object() -> None:
+    random_alias = "".join(choice(string.ascii_letters) for _ in range(100))
+    alias_dict: Dict[str, Any] = {random_alias: {}}
+
+    index = Index("i", using="alias")
+    index.aliases(**alias_dict)
+
+    assert index._aliases == alias_dict
+
+
+def test_aliases_returned_from_to_dict() -> None:
+    random_alias = "".join(choice(string.ascii_letters) for _ in range(100))
+    alias_dict: Dict[str, Any] = {random_alias: {}}
+
+    index = Index("i", using="alias")
+    index.aliases(**alias_dict)
+
+    assert index._aliases == index.to_dict()["aliases"] == alias_dict
+
+
+def test_analyzers_added_to_object() -> None:
+    random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100))
+    random_analyzer = analyzer(
+        random_analyzer_name, tokenizer="standard", filter="standard"
+    )
+
+    index = Index("i", using="alias")
+    index.analyzer(random_analyzer)
+
+    assert index._analysis["analyzer"][random_analyzer_name] == {
+        "filter": ["standard"],
+        "type": "custom",
+        "tokenizer": "standard",
+    }
+
+
+def test_analyzers_returned_from_to_dict() -> None:
+    random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100))
+    random_analyzer = analyzer(
+        random_analyzer_name, tokenizer="standard", filter="standard"
+    )
+    index = Index("i", using="alias")
+    index.analyzer(random_analyzer)
+
+    assert index.to_dict()["settings"]["analysis"]["analyzer"][
+        random_analyzer_name
+    ] == {"filter": ["standard"], "type": "custom", "tokenizer": "standard"}
+
+
+def test_conflicting_analyzer_raises_error() -> None:
+    i = Index("i")
+    i.analyzer("my_analyzer", tokenizer="whitespace", filter=["lowercase", "stop"])
+
+    with raises(ValueError):
+        i.analyzer("my_analyzer", tokenizer="keyword", filter=["lowercase", "stop"])
+
+
+def test_index_template_can_have_order() -> None:
+    i = Index("i-*")
+    it = i.as_template("i", order=2)
+
+    assert {"index_patterns": ["i-*"], "order": 2} == it.to_dict()
+
+
+@pytest.mark.sync
+def test_index_template_save_result(mock_client: Any) -> None:
+    it = IndexTemplate("test-template", "test-*")
+
+    assert it.save(using="mock") == mock_client.indices.put_template()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_mapping.py 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_mapping.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_mapping.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_mapping.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,222 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import json
+
+from elasticsearch.dsl import Keyword, Mapping, Nested, Text, analysis
+
+
+def test_mapping_can_has_fields() -> None:
+    m = Mapping()
+    m.field("name", "text").field("tags", "keyword")
+
+    assert {
+        "properties": {"name": {"type": "text"}, "tags": {"type": "keyword"}}
+    } == m.to_dict()
+
+
+def test_mapping_update_is_recursive() -> None:
+    m1 = Mapping()
+    m1.field("title", "text")
+    m1.field("author", "object")
+    m1.field("author", "object", properties={"name": {"type": "text"}})
+    m1.meta("_all", enabled=False)
+    m1.meta("dynamic", False)
+
+    m2 = Mapping()
+    m2.field("published_from", "date")
+    m2.field("author", "object", properties={"email": {"type": "text"}})
+    m2.field("title", "text")
+    m2.field("lang", "keyword")
+    m2.meta("_analyzer", path="lang")
+
+    m1.update(m2, update_only=True)
+
+    assert {
+        "_all": {"enabled": False},
+        "_analyzer": {"path": "lang"},
+        "dynamic": False,
+        "properties": {
+            "published_from": {"type": "date"},
+            "title": {"type": "text"},
+            "lang": {"type": "keyword"},
+            "author": {
+                "type": "object",
+                "properties": {"name": {"type": "text"}, "email": {"type": "text"}},
+            },
+        },
+    } == m1.to_dict()
+
+
+def test_properties_can_iterate_over_all_the_fields() -> None:
+    m = Mapping()
+    m.field("f1", "text", test_attr="f1", fields={"f2": Keyword(test_attr="f2")})
+    m.field("f3", Nested(test_attr="f3", properties={"f4": Text(test_attr="f4")}))
+
+    assert {"f1", "f2", "f3", "f4"} == {
+        f.test_attr for f in m.properties._collect_fields()
+    }
+
+
+def test_mapping_can_collect_all_analyzers_and_normalizers() -> None:
+    a1 = analysis.analyzer(
+        "my_analyzer1",
+        tokenizer="keyword",
+        filter=[
+            "lowercase",
+            analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]),
+        ],
+    )
+    a2 = analysis.analyzer("english")
+    a3 = analysis.analyzer("unknown_custom")
+    a4 = analysis.analyzer(
+        "my_analyzer2",
+        tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3),
+        filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])],
+    )
+    a5 = analysis.analyzer("my_analyzer3", tokenizer="keyword")
+    n1 = analysis.normalizer("my_normalizer1", filter=["lowercase"])
+    n2 = analysis.normalizer(
+        "my_normalizer2",
+        filter=[
+            "my_filter1",
+            "my_filter2",
+            analysis.token_filter("my_filter3", "stop", stopwords=["e", "f"]),
+        ],
+    )
+    n3 = analysis.normalizer("unknown_custom")
+
+    m = Mapping()
+    m.field(
+        "title",
+        "text",
+        analyzer=a1,
+        fields={"english": Text(analyzer=a2), "unknown": Keyword(search_analyzer=a3)},
+    )
+    m.field("comments", Nested(properties={"author": Text(analyzer=a4)}))
+    m.field("normalized_title", "keyword", normalizer=n1)
+    m.field("normalized_comment", "keyword", normalizer=n2)
+    m.field("unknown", "keyword", normalizer=n3)
+    m.meta("_all", analyzer=a5)
+
+    assert {
+        "analyzer": {
+            "my_analyzer1": {
+                "filter": ["lowercase", "my_filter1"],
+                "tokenizer": "keyword",
+                "type": "custom",
+            },
+            "my_analyzer2": {
+                "filter": ["my_filter2"],
+                "tokenizer": "trigram",
+                "type": "custom",
+            },
+            "my_analyzer3": {"tokenizer": "keyword", "type": "custom"},
+        },
+        "normalizer": {
+            "my_normalizer1": {"filter": ["lowercase"], "type": "custom"},
+            "my_normalizer2": {
+                "filter": ["my_filter1", "my_filter2", "my_filter3"],
+                "type": "custom",
+            },
+        },
+        "filter": {
+            "my_filter1": {"stopwords": ["a", "b"], "type": "stop"},
+            "my_filter2": {"stopwords": ["c", "d"], "type": "stop"},
+            "my_filter3": {"stopwords": ["e", "f"], "type": "stop"},
+        },
+        "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}},
+    } == m._collect_analysis()
+
+    assert json.loads(json.dumps(m.to_dict())) == m.to_dict()
+
+
+def test_mapping_can_collect_multiple_analyzers() -> None:
+    a1 = analysis.analyzer(
+        "my_analyzer1",
+        tokenizer="keyword",
+        filter=[
+            "lowercase",
+            analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]),
+        ],
+    )
+    a2 = analysis.analyzer(
+        "my_analyzer2",
+        tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3),
+        filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])],
+    )
+    m = Mapping()
+    m.field("title", "text", analyzer=a1, search_analyzer=a2)
+    m.field(
+        "text",
+        "text",
+        analyzer=a1,
+        fields={
+            "english": Text(analyzer=a1),
+            "unknown": Keyword(analyzer=a1, search_analyzer=a2),
+        },
+    )
+    assert {
+        "analyzer": {
+            "my_analyzer1": {
+                "filter": ["lowercase", "my_filter1"],
+                "tokenizer": "keyword",
+                "type": "custom",
+            },
+            "my_analyzer2": {
+                "filter": ["my_filter2"],
+                "tokenizer": "trigram",
+                "type": "custom",
+            },
+        },
+        "filter": {
+            "my_filter1": {"stopwords": ["a", "b"], "type": "stop"},
+            "my_filter2": {"stopwords": ["c", "d"], "type": "stop"},
+        },
+        "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}},
+    } == m._collect_analysis()
+
+
+def test_even_non_custom_analyzers_can_have_params() -> None:
+    a1 = analysis.analyzer("whitespace", type="pattern", pattern=r"\\s+")
+    m = Mapping()
+    m.field("title", "text", analyzer=a1)
+
+    assert {
+        "analyzer": {"whitespace": {"type": "pattern", "pattern": r"\\s+"}}
+    } == m._collect_analysis()
+
+
+def test_resolve_field_can_resolve_multifields() -> None:
+    m = Mapping()
+    m.field("title", "text", fields={"keyword": Keyword()})
+
+    assert isinstance(m.resolve_field("title.keyword"), Keyword)
+
+
+def test_resolve_nested() -> None:
+    m = Mapping()
+    m.field("n1", "nested", properties={"n2": Nested(properties={"k1": Keyword()})})
+    m.field("k2", "keyword")
+
+    nested, field = m.resolve_nested("n1.n2.k1")
+    assert nested == ["n1", "n1.n2"]
+    assert isinstance(field, Keyword)
+
+    nested, field = m.resolve_nested("k2")
+    assert nested == []
+    assert isinstance(field, Keyword)
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_search.py 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_search.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,839 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from copy import deepcopy
+from typing import Any
+
+import pytest
+from pytest import raises
+
+from elasticsearch.dsl import (
+    Document,
+    EmptySearch,
+    Q,
+    Search,
+    query,
+    types,
+    wrappers,
+)
+from elasticsearch.dsl.exceptions import IllegalOperation
+
+
+def test_expand__to_dot_is_respected() -> None:
+    s = Search().query("match", a__b=42, _expand__to_dot=False)
+
+    assert {"query": {"match": {"a__b": 42}}} == s.to_dict()
+
+
+@pytest.mark.sync
+def test_execute_uses_cache() -> None:
+    s = Search()
+    r = object()
+    s._response = r  # type: ignore[assignment]
+
+    assert r is s.execute()
+
+
+@pytest.mark.sync
+def test_cache_can_be_ignored(mock_client: Any) -> None:
+    s = Search(using="mock")
+    r = object()
+    s._response = r  # type: ignore[assignment]
+    s.execute(ignore_cache=True)
+
+    mock_client.search.assert_called_once_with(index=None, body={})
+
+
+@pytest.mark.sync
+def test_iter_iterates_over_hits() -> None:
+    s = Search()
+    s._response = [1, 2, 3]  # type: ignore[assignment]
+
+    assert [1, 2, 3] == [hit for hit in s]
+
+
+def test_cache_isnt_cloned() -> None:
+    s = Search()
+    s._response = object()  # type: ignore[assignment]
+
+    assert not hasattr(s._clone(), "_response")
+
+
+def test_search_starts_with_no_query() -> None:
+    s = Search()
+
+    assert s.query._proxied is None
+
+
+def test_search_query_combines_query() -> None:
+    s = Search()
+
+    s2 = s.query("match", f=42)
+    assert s2.query._proxied == query.Match(f=42)
+    assert s.query._proxied is None
+
+    s3 = s2.query("match", f=43)
+    assert s2.query._proxied == query.Match(f=42)
+    assert s3.query._proxied == query.Bool(must=[query.Match(f=42), query.Match(f=43)])
+
+
+def test_query_can_be_assigned_to() -> None:
+    s = Search()
+
+    q = Q("match", title="python")
+    s.query = q  # type: ignore
+
+    assert s.query._proxied is q
+
+
+def test_query_can_be_wrapped() -> None:
+    s = Search().query("match", title="python")
+
+    s.query = Q("function_score", query=s.query, field_value_factor={"field": "rating"})  # type: ignore
+
+    assert {
+        "query": {
+            "function_score": {
+                "functions": [{"field_value_factor": {"field": "rating"}}],
+                "query": {"match": {"title": "python"}},
+            }
+        }
+    } == s.to_dict()
+
+
+def test_using() -> None:
+    o = object()
+    o2 = object()
+    s = Search(using=o)
+    assert s._using is o
+    s2 = s.using(o2)  # type: ignore[arg-type]
+    assert s._using is o
+    assert s2._using is o2
+
+
+def test_methods_are_proxied_to_the_query() -> None:
+    s = Search().query("match_all")
+
+    assert s.query.to_dict() == {"match_all": {}}
+
+
+def test_query_always_returns_search() -> None:
+    s = Search()
+
+    assert isinstance(s.query("match", f=42), Search)
+
+
+def test_source_copied_on_clone() -> None:
+    s = Search().source(False)
+    assert s._clone()._source == s._source
+    assert s._clone()._source is False
+
+    s2 = Search().source([])
+    assert s2._clone()._source == s2._source
+    assert s2._source == []
+
+    s3 = Search().source(["some", "fields"])
+    assert s3._clone()._source == s3._source
+    assert s3._clone()._source == ["some", "fields"]
+
+
+def test_copy_clones() -> None:
+    from copy import copy
+
+    s1 = Search().source(["some", "fields"])
+    s2 = copy(s1)
+
+    assert s1 == s2
+    assert s1 is not s2
+
+
+def test_aggs_allow_two_metric() -> None:
+    s = Search()
+
+    s.aggs.metric("a", "max", field="a").metric("b", "max", field="b")
+
+    assert s.to_dict() == {
+        "aggs": {"a": {"max": {"field": "a"}}, "b": {"max": {"field": "b"}}}
+    }
+
+
+def test_aggs_get_copied_on_change() -> None:
+    s = Search().query("match_all")
+    s.aggs.bucket("per_tag", "terms", field="f").metric(
+        "max_score", "max", field="score"
+    )
+
+    s2 = s.query("match_all")
+    s2.aggs.bucket("per_month", "date_histogram", field="date", interval="month")
+    s3 = s2.query("match_all")
+    s3.aggs["per_month"].metric("max_score", "max", field="score")
+    s4 = s3._clone()
+    s4.aggs.metric("max_score", "max", field="score")
+
+    d: Any = {
+        "query": {"match_all": {}},
+        "aggs": {
+            "per_tag": {
+                "terms": {"field": "f"},
+                "aggs": {"max_score": {"max": {"field": "score"}}},
+            }
+        },
+    }
+
+    assert d == s.to_dict()
+    d["aggs"]["per_month"] = {"date_histogram": {"field": "date", "interval": "month"}}
+    assert d == s2.to_dict()
+    d["aggs"]["per_month"]["aggs"] = {"max_score": {"max": {"field": "score"}}}
+    assert d == s3.to_dict()
+    d["aggs"]["max_score"] = {"max": {"field": "score"}}
+    assert d == s4.to_dict()
+
+
+def test_search_index() -> None:
+    s = Search(index="i")
+    assert s._index == ["i"]
+    s = s.index("i2")
+    assert s._index == ["i", "i2"]
+    s = s.index("i3")
+    assert s._index == ["i", "i2", "i3"]
+    s = s.index()
+    assert s._index is None
+    s = Search(index=("i", "i2"))
+    assert s._index == ["i", "i2"]
+    s = Search(index=["i", "i2"])
+    assert s._index == ["i", "i2"]
+    s = Search()
+    s = s.index("i", "i2")
+    assert s._index == ["i", "i2"]
+    s2 = s.index("i3")
+    assert s._index == ["i", "i2"]
+    assert s2._index == ["i", "i2", "i3"]
+    s = Search()
+    s = s.index(["i", "i2"], "i3")
+    assert s._index == ["i", "i2", "i3"]
+    s2 = s.index("i4")
+    assert s._index == ["i", "i2", "i3"]
+    assert s2._index == ["i", "i2", "i3", "i4"]
+    s2 = s.index(["i4"])
+    assert s2._index == ["i", "i2", "i3", "i4"]
+    s2 = s.index(("i4", "i5"))
+    assert s2._index == ["i", "i2", "i3", "i4", "i5"]
+
+
+def test_doc_type_document_class() -> None:
+    class MyDocument(Document):
+        pass
+
+    s = Search(doc_type=MyDocument)
+    assert s._doc_type == [MyDocument]
+    assert s._doc_type_map == {}
+
+    s = Search().doc_type(MyDocument)
+    assert s._doc_type == [MyDocument]
+    assert s._doc_type_map == {}
+
+
+def test_knn() -> None:
+    s = Search()
+
+    with raises(TypeError):
+        s.knn()  # type: ignore[call-arg]
+    with raises(TypeError):
+        s.knn("field")  # type: ignore[call-arg]
+    with raises(TypeError):
+        s.knn("field", 5)  # type: ignore[call-arg]
+    with raises(ValueError):
+        s.knn("field", 5, 100)
+    with raises(ValueError):
+        s.knn("field", 5, 100, query_vector=[1, 2, 3], query_vector_builder={})
+
+    s = s.knn("field", 5, 100, query_vector=[1, 2, 3])
+    assert {
+        "knn": {
+            "field": "field",
+            "k": 5,
+            "num_candidates": 100,
+            "query_vector": [1, 2, 3],
+        }
+    } == s.to_dict()
+
+    s = s.knn(
+        k=4,
+        num_candidates=40,
+        boost=0.8,
+        field="name",
+        query_vector_builder={
+            "text_embedding": {"model_id": "foo", "model_text": "search text"}
+        },
+        inner_hits={"size": 1},
+    )
+    assert {
+        "knn": [
+            {
+                "field": "field",
+                "k": 5,
+                "num_candidates": 100,
+                "query_vector": [1, 2, 3],
+            },
+            {
+                "field": "name",
+                "k": 4,
+                "num_candidates": 40,
+                "query_vector_builder": {
+                    "text_embedding": {"model_id": "foo", "model_text": "search text"}
+                },
+                "boost": 0.8,
+                "inner_hits": {"size": 1},
+            },
+        ]
+    } == s.to_dict()
+
+
+def test_rank() -> None:
+    s = Search()
+    s.rank(rrf=False)
+    assert {} == s.to_dict()
+
+    s = s.rank(rrf=True)
+    assert {"rank": {"rrf": {}}} == s.to_dict()
+
+    s = s.rank(rrf={"window_size": 50, "rank_constant": 20})
+    assert {"rank": {"rrf": {"window_size": 50, "rank_constant": 20}}} == s.to_dict()
+
+
+def test_sort() -> None:
+    s = Search()
+    s = s.sort("fielda", "-fieldb")
+
+    assert ["fielda", {"fieldb": {"order": "desc"}}] == s._sort
+    assert {"sort": ["fielda", {"fieldb": {"order": "desc"}}]} == s.to_dict()
+
+    s = s.sort()
+    assert [] == s._sort
+    assert Search().to_dict() == s.to_dict()
+
+
+def test_sort_by_score() -> None:
+    s = Search()
+    s = s.sort("_score")
+    assert {"sort": ["_score"]} == s.to_dict()
+
+    s = Search()
+    with raises(IllegalOperation):
+        s.sort("-_score")
+
+
+def test_collapse() -> None:
+    s = Search()
+
+    inner_hits = {"name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}]}
+    s = s.collapse("user.id", inner_hits=inner_hits, max_concurrent_group_searches=4)
+
+    assert {
+        "field": "user.id",
+        "inner_hits": {
+            "name": "most_recent",
+            "size": 5,
+            "sort": [{"@timestamp": "desc"}],
+        },
+        "max_concurrent_group_searches": 4,
+    } == s._collapse
+    assert {
+        "collapse": {
+            "field": "user.id",
+            "inner_hits": {
+                "name": "most_recent",
+                "size": 5,
+                "sort": [{"@timestamp": "desc"}],
+            },
+            "max_concurrent_group_searches": 4,
+        }
+    } == s.to_dict()
+
+    s = s.collapse()
+    assert {} == s._collapse
+    assert Search().to_dict() == s.to_dict()
+
+
+def test_slice() -> None:
+    s = Search()
+    assert {"from": 3, "size": 7} == s[3:10].to_dict()
+    assert {"size": 5} == s[:5].to_dict()
+    assert {"from": 3} == s[3:].to_dict()
+    assert {"from": 0, "size": 0} == s[0:0].to_dict()
+    assert {"from": 20, "size": 0} == s[20:0].to_dict()
+    assert {"from": 10, "size": 5} == s[10:][:5].to_dict()
+    assert {"from": 10, "size": 0} == s[:5][10:].to_dict()
+    assert {"size": 10} == s[:10][:40].to_dict()
+    assert {"size": 10} == s[:40][:10].to_dict()
+    assert {"size": 40} == s[:40][:80].to_dict()
+    assert {"from": 12, "size": 0} == s[:5][10:][2:].to_dict()
+    assert {"from": 15, "size": 0} == s[10:][:5][5:].to_dict()
+    assert {} == s[:].to_dict()
+    with raises(ValueError):
+        s[-1:]
+    with raises(ValueError):
+        s[4:-1]
+    with raises(ValueError):
+        s[-3:-2]
+
+
+def test_index() -> None:
+    s = Search()
+    assert {"from": 3, "size": 1} == s[3].to_dict()
+    assert {"from": 3, "size": 1} == s[3][0].to_dict()
+    assert {"from": 8, "size": 0} == s[3][5].to_dict()
+    assert {"from": 4, "size": 1} == s[3:10][1].to_dict()
+    with raises(ValueError):
+        s[-3]
+
+
+def test_search_to_dict() -> None:
+    s = Search()
+    assert {} == s.to_dict()
+
+    s = s.query("match", f=42)
+    assert {"query": {"match": {"f": 42}}} == s.to_dict()
+
+    assert {"query": {"match": {"f": 42}}, "size": 10} == s.to_dict(size=10)
+
+    s.aggs.bucket("per_tag", "terms", field="f").metric(
+        "max_score", "max", field="score"
+    )
+    d = {
+        "aggs": {
+            "per_tag": {
+                "terms": {"field": "f"},
+                "aggs": {"max_score": {"max": {"field": "score"}}},
+            }
+        },
+        "query": {"match": {"f": 42}},
+    }
+    assert d == s.to_dict()
+
+    s = Search(extra={"size": 5})
+    assert {"size": 5} == s.to_dict()
+    s = s.extra(from_=42)
+    assert {"size": 5, "from": 42} == s.to_dict()
+
+
+def test_complex_example() -> None:
+    s = Search()
+    s = (
+        s.query("match", title="python")
+        .query(~Q("match", title="ruby"))
+        .filter(Q("term", category="meetup") | Q("term", category="conference"))
+        .collapse("user_id")
+        .post_filter("terms", tags=["prague", "czech"])
+        .script_fields(more_attendees="doc['attendees'].value + 42")
+    )
+
+    s.aggs.bucket("per_country", "terms", field="country").metric(
+        "avg_attendees", "avg", field="attendees"
+    )
+
+    s.query.minimum_should_match = 2
+
+    s = s.highlight_options(order="score").highlight("title", "body", fragment_size=50)
+
+    assert {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": "meetup"}},
+                                {"term": {"category": "conference"}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [{"match": {"title": "python"}}],
+                "must_not": [{"match": {"title": "ruby"}}],
+                "minimum_should_match": 2,
+            }
+        },
+        "post_filter": {"terms": {"tags": ["prague", "czech"]}},
+        "aggs": {
+            "per_country": {
+                "terms": {"field": "country"},
+                "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}},
+            }
+        },
+        "collapse": {"field": "user_id"},
+        "highlight": {
+            "order": "score",
+            "fields": {"title": {"fragment_size": 50}, "body": {"fragment_size": 50}},
+        },
+        "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}},
+    } == s.to_dict()
+
+
+def test_reverse() -> None:
+    d = {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": "meetup"}},
+                                {"term": {"category": "conference"}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [
+                    {
+                        "bool": {
+                            "must": [{"match": {"title": "python"}}],
+                            "must_not": [{"match": {"title": "ruby"}}],
+                            "minimum_should_match": 2,
+                        }
+                    }
+                ],
+            }
+        },
+        "post_filter": {"bool": {"must": [{"terms": {"tags": ["prague", "czech"]}}]}},
+        "aggs": {
+            "per_country": {
+                "terms": {"field": "country"},
+                "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}},
+            }
+        },
+        "sort": ["title", {"category": {"order": "desc"}}, "_score"],
+        "size": 5,
+        "highlight": {"order": "score", "fields": {"title": {"fragment_size": 50}}},
+        "suggest": {
+            "my-title-suggestions-1": {
+                "text": "devloping distibutd saerch engies",
+                "term": {"size": 3, "field": "title"},
+            }
+        },
+        "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}},
+    }
+
+    d2 = deepcopy(d)
+
+    s = Search.from_dict(d)
+
+    # make sure we haven't modified anything in place
+    assert d == d2
+    assert {"size": 5} == s._extra
+    assert d == s.to_dict()
+
+
+def test_code_generated_classes() -> None:
+    s = Search()
+    s = (
+        s.query(query.Match("title", types.MatchQuery(query="python")))
+        .query(~query.Match("title", types.MatchQuery(query="ruby")))
+        .query(
+            query.Knn(
+                field="title",
+                query_vector=[1.0, 2.0, 3.0],
+                num_candidates=10,
+                k=3,
+                filter=query.Range("year", wrappers.Range(gt="2004")),
+            )
+        )
+        .filter(
+            query.Term("category", types.TermQuery(value="meetup"))
+            | query.Term("category", types.TermQuery(value="conference"))
+        )
+        .collapse("user_id")
+        .post_filter(query.Terms(tags=["prague", "czech"]))
+        .script_fields(more_attendees="doc['attendees'].value + 42")
+    )
+    assert {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": {"value": "meetup"}}},
+                                {"term": {"category": {"value": "conference"}}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [
+                    {"match": {"title": {"query": "python"}}},
+                    {
+                        "knn": {
+                            "field": "title",
+                            "filter": [
+                                {
+                                    "range": {
+                                        "year": {
+                                            "gt": "2004",
+                                        },
+                                    },
+                                },
+                            ],
+                            "k": 3,
+                            "num_candidates": 10,
+                            "query_vector": [
+                                1.0,
+                                2.0,
+                                3.0,
+                            ],
+                        },
+                    },
+                ],
+                "must_not": [{"match": {"title": {"query": "ruby"}}}],
+            }
+        },
+        "post_filter": {"terms": {"tags": ["prague", "czech"]}},
+        "collapse": {"field": "user_id"},
+        "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}},
+    } == s.to_dict()
+
+
+def test_from_dict_doesnt_need_query() -> None:
+    s = Search.from_dict({"size": 5})
+
+    assert {"size": 5} == s.to_dict()
+
+
+@pytest.mark.sync
+def test_params_being_passed_to_search(mock_client: Any) -> None:
+    s = Search(using="mock")
+    s = s.params(routing="42")
+    s.execute()
+
+    mock_client.search.assert_called_once_with(index=None, body={}, routing="42")
+
+
+def test_source() -> None:
+    assert {} == Search().source().to_dict()
+
+    assert {
+        "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]}
+    } == Search().source(includes=["foo.bar.*"], excludes=("foo.one",)).to_dict()
+
+    assert {"_source": False} == Search().source(False).to_dict()
+
+    assert {"_source": ["f1", "f2"]} == Search().source(
+        includes=["foo.bar.*"], excludes=["foo.one"]
+    ).source(["f1", "f2"]).to_dict()
+
+
+def test_source_on_clone() -> None:
+    assert {
+        "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]},
+        "query": {"bool": {"filter": [{"term": {"title": "python"}}]}},
+    } == Search().source(includes=["foo.bar.*"]).source(excludes=["foo.one"]).filter(
+        "term", title="python"
+    ).to_dict()
+    assert {
+        "_source": False,
+        "query": {"bool": {"filter": [{"term": {"title": "python"}}]}},
+    } == Search().source(False).filter("term", title="python").to_dict()
+
+
+def test_source_on_clear() -> None:
+    assert (
+        {}
+        == Search()
+        .source(includes=["foo.bar.*"])
+        .source(includes=None, excludes=None)
+        .to_dict()
+    )
+
+
+def test_suggest_accepts_global_text() -> None:
+    s = Search.from_dict(
+        {
+            "suggest": {
+                "text": "the amsterdma meetpu",
+                "my-suggest-1": {"term": {"field": "title"}},
+                "my-suggest-2": {"text": "other", "term": {"field": "body"}},
+            }
+        }
+    )
+
+    assert {
+        "suggest": {
+            "my-suggest-1": {
+                "term": {"field": "title"},
+                "text": "the amsterdma meetpu",
+            },
+            "my-suggest-2": {"term": {"field": "body"}, "text": "other"},
+        }
+    } == s.to_dict()
+
+
+def test_suggest() -> None:
+    s = Search()
+    s = s.suggest("my_suggestion", "pyhton", term={"field": "title"})
+
+    assert {
+        "suggest": {"my_suggestion": {"term": {"field": "title"}, "text": "pyhton"}}
+    } == s.to_dict()
+
+
+def test_exclude() -> None:
+    s = Search()
+    s = s.exclude("match", title="python")
+
+    assert {
+        "query": {
+            "bool": {
+                "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}]
+            }
+        }
+    } == s.to_dict()
+
+
+@pytest.mark.sync
+def test_delete_by_query(mock_client: Any) -> None:
+    s = Search(using="mock", index="i").query("match", lang="java")
+    s.delete()
+
+    mock_client.delete_by_query.assert_called_once_with(
+        index=["i"], body={"query": {"match": {"lang": "java"}}}
+    )
+
+
+def test_update_from_dict() -> None:
+    s = Search()
+    s.update_from_dict({"indices_boost": [{"important-documents": 2}]})
+    s.update_from_dict({"_source": ["id", "name"]})
+    s.update_from_dict({"collapse": {"field": "user_id"}})
+
+    assert {
+        "indices_boost": [{"important-documents": 2}],
+        "_source": ["id", "name"],
+        "collapse": {"field": "user_id"},
+    } == s.to_dict()
+
+
+def test_rescore_query_to_dict() -> None:
+    s = Search(index="index-name")
+
+    positive_query = Q(
+        "function_score",
+        query=Q("term", tags="a"),
+        script_score={"script": "_score * 1"},
+    )
+
+    negative_query = Q(
+        "function_score",
+        query=Q("term", tags="b"),
+        script_score={"script": "_score * -100"},
+    )
+
+    s = s.query(positive_query)
+    s = s.extra(
+        rescore={"window_size": 100, "query": {"rescore_query": negative_query}}
+    )
+    assert s.to_dict() == {
+        "query": {
+            "function_score": {
+                "query": {"term": {"tags": "a"}},
+                "functions": [{"script_score": {"script": "_score * 1"}}],
+            }
+        },
+        "rescore": {
+            "window_size": 100,
+            "query": {
+                "rescore_query": {
+                    "function_score": {
+                        "query": {"term": {"tags": "b"}},
+                        "functions": [{"script_score": {"script": "_score * -100"}}],
+                    }
+                }
+            },
+        },
+    }
+
+    assert s.to_dict(
+        rescore={"window_size": 10, "query": {"rescore_query": positive_query}}
+    ) == {
+        "query": {
+            "function_score": {
+                "query": {"term": {"tags": "a"}},
+                "functions": [{"script_score": {"script": "_score * 1"}}],
+            }
+        },
+        "rescore": {
+            "window_size": 10,
+            "query": {
+                "rescore_query": {
+                    "function_score": {
+                        "query": {"term": {"tags": "a"}},
+                        "functions": [{"script_score": {"script": "_score * 1"}}],
+                    }
+                }
+            },
+        },
+    }
+
+
+@pytest.mark.sync
+def test_empty_search() -> None:
+    s = EmptySearch(index="index-name")
+    s = s.query("match", lang="java")
+    s.aggs.bucket("versions", "terms", field="version")
+
+    assert s.count() == 0
+    assert [hit for hit in s] == []
+    assert [hit for hit in s.scan()] == []
+    s.delete()  # should not error
+
+
+def test_suggest_completion() -> None:
+    s = Search()
+    s = s.suggest("my_suggestion", "pyhton", completion={"field": "title"})
+
+    assert {
+        "suggest": {
+            "my_suggestion": {"completion": {"field": "title"}, "prefix": "pyhton"}
+        }
+    } == s.to_dict()
+
+
+def test_suggest_regex_query() -> None:
+    s = Search()
+    s = s.suggest("my_suggestion", regex="py[thon|py]", completion={"field": "title"})
+
+    assert {
+        "suggest": {
+            "my_suggestion": {"completion": {"field": "title"}, "regex": "py[thon|py]"}
+        }
+    } == s.to_dict()
+
+
+def test_suggest_must_pass_text_or_regex() -> None:
+    s = Search()
+    with raises(ValueError):
+        s.suggest("my_suggestion")
+
+
+def test_suggest_can_only_pass_text_or_regex() -> None:
+    s = Search()
+    with raises(ValueError):
+        s.suggest("my_suggestion", text="python", regex="py[hton|py]")
+
+
+def test_suggest_regex_must_be_wtih_completion() -> None:
+    s = Search()
+    with raises(ValueError):
+        s.suggest("my_suggestion", regex="py[thon|py]")
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_update_by_query.py 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_update_by_query.py
--- 8.17.2-2/test_elasticsearch/test_dsl/_sync/test_update_by_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/_sync/test_update_by_query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,180 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from copy import deepcopy
+from typing import Any
+
+import pytest
+
+from elasticsearch.dsl import Q, UpdateByQuery
+from elasticsearch.dsl.response import UpdateByQueryResponse
+from elasticsearch.dsl.search_base import SearchBase
+
+
+def test_ubq_starts_with_no_query() -> None:
+    ubq = UpdateByQuery()
+
+    assert ubq.query._proxied is None
+
+
+def test_ubq_to_dict() -> None:
+    ubq = UpdateByQuery()
+    assert {} == ubq.to_dict()
+
+    ubq = ubq.query("match", f=42)
+    assert {"query": {"match": {"f": 42}}} == ubq.to_dict()
+
+    assert {"query": {"match": {"f": 42}}, "size": 10} == ubq.to_dict(size=10)
+
+    ubq = UpdateByQuery(extra={"size": 5})
+    assert {"size": 5} == ubq.to_dict()
+
+    ubq = UpdateByQuery(extra={"extra_q": Q("term", category="conference")})
+    assert {"extra_q": {"term": {"category": "conference"}}} == ubq.to_dict()
+
+
+def test_complex_example() -> None:
+    ubq = UpdateByQuery()
+    ubq = (
+        ubq.query("match", title="python")
+        .query(~Q("match", title="ruby"))
+        .filter(Q("term", category="meetup") | Q("term", category="conference"))
+        .script(
+            source="ctx._source.likes += params.f", lang="painless", params={"f": 3}
+        )
+    )
+
+    ubq.query.minimum_should_match = 2
+    assert {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": "meetup"}},
+                                {"term": {"category": "conference"}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [{"match": {"title": "python"}}],
+                "must_not": [{"match": {"title": "ruby"}}],
+                "minimum_should_match": 2,
+            }
+        },
+        "script": {
+            "source": "ctx._source.likes += params.f",
+            "lang": "painless",
+            "params": {"f": 3},
+        },
+    } == ubq.to_dict()
+
+
+def test_exclude() -> None:
+    ubq = UpdateByQuery()
+    ubq = ubq.exclude("match", title="python")
+
+    assert {
+        "query": {
+            "bool": {
+                "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}]
+            }
+        }
+    } == ubq.to_dict()
+
+
+def test_reverse() -> None:
+    d = {
+        "query": {
+            "bool": {
+                "filter": [
+                    {
+                        "bool": {
+                            "should": [
+                                {"term": {"category": "meetup"}},
+                                {"term": {"category": "conference"}},
+                            ]
+                        }
+                    }
+                ],
+                "must": [
+                    {
+                        "bool": {
+                            "must": [{"match": {"title": "python"}}],
+                            "must_not": [{"match": {"title": "ruby"}}],
+                            "minimum_should_match": 2,
+                        }
+                    }
+                ],
+            }
+        },
+        "script": {
+            "source": "ctx._source.likes += params.f",
+            "lang": "painless",
+            "params": {"f": 3},
+        },
+    }
+
+    d2 = deepcopy(d)
+
+    ubq = UpdateByQuery.from_dict(d)
+
+    assert d == d2
+    assert d == ubq.to_dict()
+
+
+def test_from_dict_doesnt_need_query() -> None:
+    ubq = UpdateByQuery.from_dict({"script": {"source": "test"}})
+
+    assert {"script": {"source": "test"}} == ubq.to_dict()
+
+
+@pytest.mark.sync
+def test_params_being_passed_to_search(mock_client: Any) -> None:
+    ubq = UpdateByQuery(using="mock", index="i")
+    ubq = ubq.params(routing="42")
+    ubq.execute()
+
+    mock_client.update_by_query.assert_called_once_with(index=["i"], routing="42")
+
+
+def test_overwrite_script() -> None:
+    ubq = UpdateByQuery()
+    ubq = ubq.script(
+        source="ctx._source.likes += params.f", lang="painless", params={"f": 3}
+    )
+    assert {
+        "script": {
+            "source": "ctx._source.likes += params.f",
+            "lang": "painless",
+            "params": {"f": 3},
+        }
+    } == ubq.to_dict()
+    ubq = ubq.script(source="ctx._source.likes++")
+    assert {"script": {"source": "ctx._source.likes++"}} == ubq.to_dict()
+
+
+def test_update_by_query_response_success() -> None:
+    ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": []})
+    assert ubqr.success()
+
+    ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": True, "failures": []})
+    assert not ubqr.success()
+
+    ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": [{}]})
+    assert not ubqr.success()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/async_sleep.py 9.2.0-1/test_elasticsearch/test_dsl/async_sleep.py
--- 8.17.2-2/test_elasticsearch/test_dsl/async_sleep.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/async_sleep.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,25 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Union
+
+import anyio
+
+
+async def sleep(secs: Union[int, float]) -> None:
+    """Tests can use this function to sleep."""
+    await anyio.sleep(secs)
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/conftest.py 9.2.0-1/test_elasticsearch/test_dsl/conftest.py
--- 8.17.2-2/test_elasticsearch/test_dsl/conftest.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/conftest.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,470 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+import os
+import re
+import time
+from datetime import datetime
+from typing import Any, AsyncGenerator, Dict, Generator, Tuple, cast
+from unittest import SkipTest
+from unittest.mock import AsyncMock, Mock
+
+import anyio
+import pytest
+import sniffio
+from elastic_transport import ObjectApiResponse
+
+from elasticsearch import AsyncElasticsearch, Elasticsearch
+from elasticsearch.dsl import Search
+from elasticsearch.dsl.async_connections import add_connection as add_async_connection
+from elasticsearch.dsl.async_connections import connections as async_connections
+from elasticsearch.dsl.connections import add_connection, connections
+from elasticsearch.exceptions import ConnectionError
+from elasticsearch.helpers import bulk
+
+from ..utils import CA_CERTS, wipe_cluster
+from .test_integration._async import test_document as async_document
+from .test_integration._sync import test_document as sync_document
+from .test_integration.test_data import (
+    DATA,
+    FLAT_DATA,
+    TEST_GIT_DATA,
+    create_flat_git_index,
+    create_git_index,
+)
+
+
+def get_test_client(
+    elasticsearch_url, wait: bool = True, **kwargs: Any
+) -> Elasticsearch:
+    # construct kwargs from the environment
+    kw: Dict[str, Any] = {"request_timeout": 30}
+
+    if elasticsearch_url.startswith("https://"):
+        kw["ca_certs"] = CA_CERTS
+
+    if "PYTHON_CONNECTION_CLASS" in os.environ:
+        kw["node_class"] = os.environ["PYTHON_CONNECTION_CLASS"]
+
+    kw.update(kwargs)
+    client = Elasticsearch(elasticsearch_url, **kw)
+
+    # wait for yellow status
+    for tries_left in range(100 if wait else 1, 0, -1):
+        try:
+            client.cluster.health(wait_for_status="yellow")
+            return client
+        except ConnectionError:
+            if wait and tries_left == 1:
+                raise
+            time.sleep(0.1)
+
+    raise SkipTest("Elasticsearch failed to start.")
+
+
+async def get_async_test_client(
+    elasticsearch_url, wait: bool = True, **kwargs: Any
+) -> AsyncElasticsearch:
+    # construct kwargs from the environment
+    kw: Dict[str, Any] = {"request_timeout": 30}
+
+    if elasticsearch_url.startswith("https://"):
+        kw["ca_certs"] = CA_CERTS
+
+    # httpx is the only HTTP client that supports trio
+    if sniffio.current_async_library() == "trio":
+        kw["node_class"] = "httpxasync"
+
+    kw.update(kwargs)
+    client = AsyncElasticsearch(elasticsearch_url, **kw)
+
+    # wait for yellow status
+    for tries_left in range(100 if wait else 1, 0, -1):
+        try:
+            await client.cluster.health(wait_for_status="yellow")
+            return client
+        except ConnectionError:
+            if wait and tries_left == 1:
+                raise
+            await anyio.sleep(0.1)
+
+    await client.close()
+    raise SkipTest("Elasticsearch failed to start.")
+
+
+def _get_version(version_string: str) -> Tuple[int, ...]:
+    if "." not in version_string:
+        return ()
+    version = version_string.strip().split(".")
+    return tuple(int(v) if v.isdigit() else 999 for v in version)
+
+
+@pytest.fixture
+def client(elasticsearch_url) -> Elasticsearch:
+    try:
+        connection = get_test_client(
+            elasticsearch_url, wait="WAIT_FOR_ES" in os.environ
+        )
+        add_connection("default", connection)
+        yield connection
+        wipe_cluster(connection)
+        connection.close()
+    except SkipTest:
+        pytest.skip()
+
+
+@pytest.fixture
+async def async_client(elasticsearch_url) -> AsyncGenerator[AsyncElasticsearch, None]:
+    try:
+        connection = await get_async_test_client(
+            elasticsearch_url, wait="WAIT_FOR_ES" in os.environ
+        )
+        add_async_connection("default", connection)
+        yield connection
+        wipe_cluster(connection)
+        await connection.close()
+    except SkipTest:
+        pytest.skip()
+
+
+@pytest.fixture
+def es_version(client: Elasticsearch) -> Generator[Tuple[int, ...], None, None]:
+    info = client.info()
+    yield tuple(
+        int(x)
+        for x in re.match(r"^([0-9.]+)", info["version"]["number"]).group(1).split(".")  # type: ignore
+    )
+
+
+@pytest.fixture
+def write_client(client: Elasticsearch) -> Generator[Elasticsearch, None, None]:
+    yield client
+    for index_name in client.indices.get(index="test-*", expand_wildcards="all"):
+        client.indices.delete(index=index_name)
+    client.options(ignore_status=404).indices.delete_template(name="test-template")
+    client.options(ignore_status=404).indices.delete_index_template(
+        name="test-template"
+    )
+
+
+@pytest.fixture
+async def async_write_client(
+    write_client: Elasticsearch, async_client: AsyncElasticsearch
+) -> AsyncGenerator[AsyncElasticsearch, None]:
+    yield async_client
+
+
+@pytest.fixture
+def mock_client(
+    dummy_response: ObjectApiResponse[Any],
+) -> Generator[Elasticsearch, None, None]:
+    client = Mock()
+    client.search.return_value = dummy_response
+    client.update_by_query.return_value = dummy_response
+    add_connection("mock", client)
+
+    yield client
+    connections._conns = {}
+    connections._kwargs = {}
+
+
+@pytest.fixture
+def async_mock_client(
+    dummy_response: ObjectApiResponse[Any],
+) -> Generator[Elasticsearch, None, None]:
+    client = Mock()
+    client.search = AsyncMock(return_value=dummy_response)
+    client.indices = AsyncMock()
+    client.update_by_query = AsyncMock()
+    client.delete_by_query = AsyncMock()
+    add_async_connection("mock", client)
+
+    yield client
+    async_connections._conns = {}
+    async_connections._kwargs = {}
+
+
+@pytest.fixture
+def data_client(client: Elasticsearch) -> Generator[Elasticsearch, None, None]:
+    # create mappings
+    create_git_index(client, "git")
+    create_flat_git_index(client, "flat-git")
+    # load data
+    bulk(client, DATA, raise_on_error=True, refresh=True)
+    bulk(client, FLAT_DATA, raise_on_error=True, refresh=True)
+    yield client
+    client.options(ignore_status=404).indices.delete(index="git")
+    client.options(ignore_status=404).indices.delete(index="flat-git")
+
+
+@pytest.fixture
+async def async_data_client(
+    data_client: Elasticsearch, async_client: AsyncElasticsearch
+) -> AsyncGenerator[AsyncElasticsearch, None]:
+    yield async_client
+
+
+@pytest.fixture
+def dummy_response() -> ObjectApiResponse[Any]:
+    return ObjectApiResponse(
+        meta=None,
+        body={
+            "_shards": {"failed": 0, "successful": 10, "total": 10},
+            "hits": {
+                "hits": [
+                    {
+                        "_index": "test-index",
+                        "_type": "company",
+                        "_id": "elasticsearch",
+                        "_score": 12.0,
+                        "_source": {"city": "Amsterdam", "name": "Elasticsearch"},
+                    },
+                    {
+                        "_index": "test-index",
+                        "_type": "employee",
+                        "_id": "42",
+                        "_score": 11.123,
+                        "_routing": "elasticsearch",
+                        "_source": {
+                            "name": {"first": "Shay", "last": "Bannon"},
+                            "lang": "java",
+                            "twitter": "kimchy",
+                        },
+                    },
+                    {
+                        "_index": "test-index",
+                        "_type": "employee",
+                        "_id": "47",
+                        "_score": 1,
+                        "_routing": "elasticsearch",
+                        "_source": {
+                            "name": {"first": "Honza", "last": "Král"},
+                            "lang": "python",
+                            "twitter": "honzakral",
+                        },
+                    },
+                    {
+                        "_index": "test-index",
+                        "_type": "employee",
+                        "_id": "53",
+                        "_score": 16.0,
+                        "_routing": "elasticsearch",
+                    },
+                ],
+                "max_score": 12.0,
+                "total": 123,
+            },
+            "timed_out": False,
+            "took": 123,
+        },
+    )
+
+
+@pytest.fixture
+def aggs_search() -> Search:
+    s = Search(index="flat-git")
+    s.aggs.bucket("popular_files", "terms", field="files", size=2).metric(
+        "line_stats", "stats", field="stats.lines"
+    ).metric("top_commits", "top_hits", size=2, _source=["stats.*", "committed_date"])
+    s.aggs.bucket(
+        "per_month", "date_histogram", interval="month", field="info.committed_date"
+    )
+    s.aggs.metric("sum_lines", "sum", field="stats.lines")
+    return s
+
+
+@pytest.fixture
+def aggs_data() -> Dict[str, Any]:
+    return {
+        "took": 4,
+        "timed_out": False,
+        "_shards": {"total": 1, "successful": 1, "failed": 0},
+        "hits": {"total": 52, "hits": [], "max_score": 0.0},
+        "aggregations": {
+            "sum_lines": {"value": 25052.0},
+            "per_month": {
+                "buckets": [
+                    {
+                        "doc_count": 38,
+                        "key": 1393632000000,
+                        "key_as_string": "2014-03-01T00:00:00.000Z",
+                    },
+                    {
+                        "doc_count": 11,
+                        "key": 1396310400000,
+                        "key_as_string": "2014-04-01T00:00:00.000Z",
+                    },
+                    {
+                        "doc_count": 3,
+                        "key": 1398902400000,
+                        "key_as_string": "2014-05-01T00:00:00.000Z",
+                    },
+                ]
+            },
+            "popular_files": {
+                "buckets": [
+                    {
+                        "key": "elasticsearch_dsl",
+                        "line_stats": {
+                            "count": 40,
+                            "max": 228.0,
+                            "min": 2.0,
+                            "sum": 2151.0,
+                            "avg": 53.775,
+                        },
+                        "doc_count": 40,
+                        "top_commits": {
+                            "hits": {
+                                "total": 40,
+                                "hits": [
+                                    {
+                                        "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037",
+                                        "_type": "doc",
+                                        "_source": {
+                                            "stats": {
+                                                "files": 4,
+                                                "deletions": 7,
+                                                "lines": 30,
+                                                "insertions": 23,
+                                            },
+                                            "committed_date": "2014-05-02T13:47:19",
+                                        },
+                                        "_score": 1.0,
+                                        "_index": "flat-git",
+                                    },
+                                    {
+                                        "_id": "eb3e543323f189fd7b698e66295427204fff5755",
+                                        "_type": "doc",
+                                        "_source": {
+                                            "stats": {
+                                                "files": 1,
+                                                "deletions": 0,
+                                                "lines": 18,
+                                                "insertions": 18,
+                                            },
+                                            "committed_date": "2014-05-01T13:32:14",
+                                        },
+                                        "_score": 1.0,
+                                        "_index": "flat-git",
+                                    },
+                                ],
+                                "max_score": 1.0,
+                            }
+                        },
+                    },
+                    {
+                        "key": "test_elasticsearch_dsl",
+                        "line_stats": {
+                            "count": 35,
+                            "max": 228.0,
+                            "min": 2.0,
+                            "sum": 1939.0,
+                            "avg": 55.4,
+                        },
+                        "doc_count": 35,
+                        "top_commits": {
+                            "hits": {
+                                "total": 35,
+                                "hits": [
+                                    {
+                                        "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037",
+                                        "_type": "doc",
+                                        "_source": {
+                                            "stats": {
+                                                "files": 4,
+                                                "deletions": 7,
+                                                "lines": 30,
+                                                "insertions": 23,
+                                            },
+                                            "committed_date": "2014-05-02T13:47:19",
+                                        },
+                                        "_score": 1.0,
+                                        "_index": "flat-git",
+                                    },
+                                    {
+                                        "_id": "dd15b6ba17dd9ba16363a51f85b31f66f1fb1157",
+                                        "_type": "doc",
+                                        "_source": {
+                                            "stats": {
+                                                "files": 3,
+                                                "deletions": 18,
+                                                "lines": 62,
+                                                "insertions": 44,
+                                            },
+                                            "committed_date": "2014-05-01T13:30:44",
+                                        },
+                                        "_score": 1.0,
+                                        "_index": "flat-git",
+                                    },
+                                ],
+                                "max_score": 1.0,
+                            }
+                        },
+                    },
+                ],
+                "doc_count_error_upper_bound": 0,
+                "sum_other_doc_count": 120,
+            },
+        },
+    }
+
+
+def make_pr(pr_module: Any) -> Any:
+    return pr_module.PullRequest(
+        _id=42,
+        comments=[
+            pr_module.Comment(
+                content="Hello World!",
+                author=pr_module.User(name="honzakral"),
+                created_at=datetime(2018, 1, 9, 10, 17, 3, 21184),
+                history=[
+                    pr_module.History(
+                        timestamp=datetime(2012, 1, 1),
+                        diff="-Ahoj Svete!\n+Hello World!",
+                    )
+                ],
+            ),
+        ],
+        created_at=datetime(2018, 1, 9, 9, 17, 3, 21184),
+    )
+
+
+@pytest.fixture
+def pull_request(write_client: Elasticsearch) -> sync_document.PullRequest:
+    sync_document.PullRequest.init()
+    pr = cast(sync_document.PullRequest, make_pr(sync_document))
+    pr.save(refresh=True)
+    return pr
+
+
+@pytest.fixture
+async def async_pull_request(
+    async_write_client: AsyncElasticsearch,
+) -> async_document.PullRequest:
+    await async_document.PullRequest.init()
+    pr = cast(async_document.PullRequest, make_pr(async_document))
+    await pr.save(refresh=True)
+    return pr
+
+
+@pytest.fixture
+def setup_ubq_tests(client: Elasticsearch) -> str:
+    index = "test-git"
+    create_git_index(client, index)
+    bulk(client, TEST_GIT_DATA, raise_on_error=True, refresh=True)
+    return index
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/sleep.py 9.2.0-1/test_elasticsearch/test_dsl/sleep.py
--- 8.17.2-2/test_elasticsearch/test_dsl/sleep.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/sleep.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,24 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import time
+from typing import Union
+
+
+def sleep(secs: Union[int, float]) -> None:
+    """Tests can use this function to sleep."""
+    time.sleep(secs)
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_aggs.py 9.2.0-1/test_elasticsearch/test_dsl/test_aggs.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_aggs.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_aggs.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,530 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from pytest import raises
+
+from elasticsearch.dsl import aggs, query, types
+
+
+def test_repr() -> None:
+    max_score = aggs.Max(field="score")
+    a = aggs.A("terms", field="tags", aggs={"max_score": max_score})
+
+    assert "Terms(aggs={'max_score': Max(field='score')}, field='tags')" == repr(a)
+
+
+def test_meta() -> None:
+    max_score = aggs.Max(field="score")
+    a = aggs.A(
+        "terms", field="tags", aggs={"max_score": max_score}, meta={"some": "metadata"}
+    )
+
+    assert {
+        "terms": {"field": "tags"},
+        "aggs": {"max_score": {"max": {"field": "score"}}},
+        "meta": {"some": "metadata"},
+    } == a.to_dict()
+
+
+def test_meta_from_dict() -> None:
+    max_score = aggs.Max(field="score")
+    a = aggs.A(
+        "terms", field="tags", aggs={"max_score": max_score}, meta={"some": "metadata"}
+    )
+
+    assert aggs.A(a.to_dict()) == a
+
+
+def test_A_creates_proper_agg() -> None:
+    a = aggs.A("terms", field="tags")
+
+    assert isinstance(a, aggs.Terms)
+    assert a._params == {"field": "tags"}
+
+
+def test_A_handles_nested_aggs_properly() -> None:
+    max_score = aggs.Max(field="score")
+    a = aggs.A("terms", field="tags", aggs={"max_score": max_score})
+
+    assert isinstance(a, aggs.Terms)
+    assert a._params == {"field": "tags", "aggs": {"max_score": max_score}}
+
+
+def test_A_passes_aggs_through() -> None:
+    a = aggs.A("terms", field="tags")
+    assert aggs.A(a) is a
+
+
+def test_A_from_dict() -> None:
+    d = {
+        "terms": {"field": "tags"},
+        "aggs": {"per_author": {"terms": {"field": "author.raw"}}},
+    }
+    a = aggs.A(d)
+
+    assert isinstance(a, aggs.Terms)
+    assert a._params == {
+        "field": "tags",
+        "aggs": {"per_author": aggs.A("terms", field="author.raw")},
+    }
+    assert a["per_author"] == aggs.A("terms", field="author.raw")
+    assert a.aggs.per_author == aggs.A("terms", field="author.raw")  # type: ignore[attr-defined]
+
+
+def test_A_fails_with_incorrect_dict() -> None:
+    correct_d = {
+        "terms": {"field": "tags"},
+        "aggs": {"per_author": {"terms": {"field": "author.raw"}}},
+    }
+
+    with raises(Exception):
+        aggs.A(correct_d, field="f")
+
+    d = correct_d.copy()
+    del d["terms"]
+    with raises(Exception):
+        aggs.A(d)
+
+    d = correct_d.copy()
+    d["xx"] = {}
+    with raises(Exception):
+        aggs.A(d)
+
+
+def test_A_fails_with_agg_and_params() -> None:
+    a = aggs.A("terms", field="tags")
+
+    with raises(Exception):
+        aggs.A(a, field="score")
+
+
+def test_buckets_are_nestable() -> None:
+    a = aggs.Terms(field="tags")
+    b = a.bucket("per_author", "terms", field="author.raw")
+
+    assert isinstance(b, aggs.Terms)
+    assert b._params == {"field": "author.raw"}
+    assert a.aggs == {"per_author": b}
+
+
+def test_metric_inside_buckets() -> None:
+    a = aggs.Terms(field="tags")
+    b = a.metric("max_score", "max", field="score")
+
+    # returns bucket so it's chainable
+    assert a is b
+    assert a.aggs["max_score"] == aggs.Max(field="score")
+
+
+def test_buckets_equals_counts_subaggs() -> None:
+    a = aggs.Terms(field="tags")
+    a.bucket("per_author", "terms", field="author.raw")
+    b = aggs.Terms(field="tags")
+
+    assert a != b
+
+
+def test_buckets_to_dict() -> None:
+    a = aggs.Terms(field="tags")
+    a.bucket("per_author", "terms", field="author.raw")
+
+    assert {
+        "terms": {"field": "tags"},
+        "aggs": {"per_author": {"terms": {"field": "author.raw"}}},
+    } == a.to_dict()
+
+    a = aggs.Terms(field="tags")
+    a.metric("max_score", "max", field="score")
+
+    assert {
+        "terms": {"field": "tags"},
+        "aggs": {"max_score": {"max": {"field": "score"}}},
+    } == a.to_dict()
+
+
+def test_nested_buckets_are_reachable_as_getitem() -> None:
+    a = aggs.Terms(field="tags")
+    b = a.bucket("per_author", "terms", field="author.raw")
+
+    assert a["per_author"] is not b
+    assert a["per_author"] == b
+
+
+def test_nested_buckets_are_settable_as_getitem() -> None:
+    a = aggs.Terms(field="tags")
+    b = a["per_author"] = aggs.A("terms", field="author.raw")
+
+    assert a.aggs["per_author"] is b
+
+
+def test_filter_can_be_instantiated_using_positional_args() -> None:
+    a = aggs.Filter(query.Q("term", f=42))
+
+    assert {"filter": {"term": {"f": 42}}} == a.to_dict()
+
+    assert a == aggs.A("filter", query.Q("term", f=42))
+
+
+def test_filter_aggregation_as_nested_agg() -> None:
+    a = aggs.Terms(field="tags")
+    a.bucket("filtered", "filter", query.Q("term", f=42))
+
+    assert {
+        "terms": {"field": "tags"},
+        "aggs": {"filtered": {"filter": {"term": {"f": 42}}}},
+    } == a.to_dict()
+
+
+def test_filter_aggregation_with_nested_aggs() -> None:
+    a = aggs.Filter(query.Q("term", f=42))
+    a.bucket("testing", "terms", field="tags")
+
+    assert {
+        "filter": {"term": {"f": 42}},
+        "aggs": {"testing": {"terms": {"field": "tags"}}},
+    } == a.to_dict()
+
+
+def test_filters_correctly_identifies_the_hash() -> None:
+    a = aggs.A(
+        "filters",
+        filters={
+            "group_a": {"term": {"group": "a"}},
+            "group_b": {"term": {"group": "b"}},
+        },
+    )
+
+    assert {
+        "filters": {
+            "filters": {
+                "group_a": {"term": {"group": "a"}},
+                "group_b": {"term": {"group": "b"}},
+            }
+        }
+    } == a.to_dict()
+    assert a.filters.group_a == query.Q("term", group="a")
+
+
+def test_bucket_sort_agg() -> None:
+    # test the dictionary (type ignored) and fully typed alterantives
+    bucket_sort_agg = aggs.BucketSort(sort=[{"total_sales": {"order": "desc"}}], size=3)  # type: ignore
+    assert bucket_sort_agg.to_dict() == {
+        "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3}
+    }
+    bucket_sort_agg = aggs.BucketSort(
+        sort=[types.SortOptions("total_sales", types.FieldSort(order="desc"))], size=3
+    )
+    assert bucket_sort_agg.to_dict() == {
+        "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3}
+    }
+
+    a = aggs.DateHistogram(field="date", interval="month")
+    a.bucket("total_sales", "sum", field="price")
+    a.bucket(
+        "sales_bucket_sort",
+        "bucket_sort",
+        sort=[{"total_sales": {"order": "desc"}}],
+        size=3,
+    )
+    assert {
+        "date_histogram": {"field": "date", "interval": "month"},
+        "aggs": {
+            "total_sales": {"sum": {"field": "price"}},
+            "sales_bucket_sort": {
+                "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3}
+            },
+        },
+    } == a.to_dict()
+
+
+def test_bucket_sort_agg_only_trnunc() -> None:
+    # test the dictionary (type ignored) and fully typed alterantives
+    bucket_sort_agg = aggs.BucketSort(**{"from": 1, "size": 1, "_expand__to_dot": False})  # type: ignore
+    assert bucket_sort_agg.to_dict() == {"bucket_sort": {"from": 1, "size": 1}}
+    bucket_sort_agg = aggs.BucketSort(from_=1, size=1, _expand__to_dot=False)
+    assert bucket_sort_agg.to_dict() == {"bucket_sort": {"from": 1, "size": 1}}
+
+    a = aggs.DateHistogram(field="date", interval="month")
+    a.bucket("bucket_truncate", "bucket_sort", **{"from": 1, "size": 1})
+    assert {
+        "date_histogram": {"field": "date", "interval": "month"},
+        "aggs": {"bucket_truncate": {"bucket_sort": {"from": 1, "size": 1}}},
+    } == a.to_dict()
+
+
+def test_geohash_grid_aggregation() -> None:
+    # test the dictionary (type ignored) and fully typed alterantives
+    a = aggs.GeohashGrid(**{"field": "centroid", "precision": 3})  # type: ignore
+    assert {"geohash_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+    a = aggs.GeohashGrid(field="centroid", precision=3)
+    assert {"geohash_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+
+
+def test_geohex_grid_aggregation() -> None:
+    # test the dictionary (type ignored) and fully typed alterantives
+    a = aggs.GeohexGrid(**{"field": "centroid", "precision": 3})  # type: ignore
+    assert {"geohex_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+    a = aggs.GeohexGrid(field="centroid", precision=3)
+    assert {"geohex_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+
+
+def test_geotile_grid_aggregation() -> None:
+    # test the dictionary (type ignored) and fully typed alterantives
+    a = aggs.GeotileGrid(**{"field": "centroid", "precision": 3})  # type: ignore
+    assert {"geotile_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+    a = aggs.GeotileGrid(field="centroid", precision=3)
+    assert {"geotile_grid": {"field": "centroid", "precision": 3}} == a.to_dict()
+
+
+def test_boxplot_aggregation() -> None:
+    a = aggs.Boxplot(field="load_time")
+
+    assert {"boxplot": {"field": "load_time"}} == a.to_dict()
+
+
+def test_rare_terms_aggregation() -> None:
+    a = aggs.RareTerms(field="the-field")
+    a.bucket("total_sales", "sum", field="price")
+    a.bucket(
+        "sales_bucket_sort",
+        "bucket_sort",
+        sort=[{"total_sales": {"order": "desc"}}],
+        size=3,
+    )
+
+    assert {
+        "aggs": {
+            "sales_bucket_sort": {
+                "bucket_sort": {"size": 3, "sort": [{"total_sales": {"order": "desc"}}]}
+            },
+            "total_sales": {"sum": {"field": "price"}},
+        },
+        "rare_terms": {"field": "the-field"},
+    } == a.to_dict()
+
+
+def test_variable_width_histogram_aggregation() -> None:
+    a = aggs.VariableWidthHistogram(field="price", buckets=2)
+    assert {"variable_width_histogram": {"buckets": 2, "field": "price"}} == a.to_dict()
+
+
+def test_ip_prefix_aggregation() -> None:
+    # test the dictionary (type ignored) and fully typed alterantives
+    a = aggs.IPPrefix(**{"field": "ipv4", "prefix_length": 24})  # type: ignore
+    assert {"ip_prefix": {"field": "ipv4", "prefix_length": 24}} == a.to_dict()
+    a = aggs.IPPrefix(field="ipv4", prefix_length=24)
+    assert {"ip_prefix": {"field": "ipv4", "prefix_length": 24}} == a.to_dict()
+
+
+def test_ip_prefix_aggregation_extra() -> None:
+    a = aggs.IPPrefix(field="ipv6", prefix_length=64, is_ipv6=True)
+
+    assert {
+        "ip_prefix": {
+            "field": "ipv6",
+            "prefix_length": 64,
+            "is_ipv6": True,
+        },
+    } == a.to_dict()
+
+
+def test_multi_terms_aggregation() -> None:
+    a = aggs.MultiTerms(terms=[{"field": "tags"}, {"field": "author.row"}])
+    assert {
+        "multi_terms": {
+            "terms": [
+                {"field": "tags"},
+                {"field": "author.row"},
+            ]
+        }
+    } == a.to_dict()
+    a = aggs.MultiTerms(
+        terms=[
+            types.MultiTermLookup(field="tags"),
+            types.MultiTermLookup(field="author.row"),
+        ]
+    )
+    assert {
+        "multi_terms": {
+            "terms": [
+                {"field": "tags"},
+                {"field": "author.row"},
+            ]
+        }
+    } == a.to_dict()
+
+
+def test_categorize_text_aggregation() -> None:
+    a = aggs.CategorizeText(
+        field="tags",
+        categorization_filters=["\\w+\\_\\d{3}"],
+        max_matched_tokens=2,
+        similarity_threshold=30,
+    )
+    assert {
+        "categorize_text": {
+            "field": "tags",
+            "categorization_filters": ["\\w+\\_\\d{3}"],
+            "max_matched_tokens": 2,
+            "similarity_threshold": 30,
+        }
+    } == a.to_dict()
+
+
+def test_median_absolute_deviation_aggregation() -> None:
+    a = aggs.MedianAbsoluteDeviation(field="rating")
+
+    assert {"median_absolute_deviation": {"field": "rating"}} == a.to_dict()
+
+
+def test_t_test_aggregation() -> None:
+    a = aggs.TTest(
+        a={"field": "startup_time_before"},
+        b={"field": "startup_time_after"},
+        type="paired",
+    )
+
+    assert {
+        "t_test": {
+            "a": {"field": "startup_time_before"},
+            "b": {"field": "startup_time_after"},
+            "type": "paired",
+        }
+    } == a.to_dict()
+
+
+def test_geo_line_aggregation() -> None:
+    a = aggs.GeoLine(point={"field": "centroid"}, sort={"field": "date"})
+
+    assert {
+        "geo_line": {
+            "point": {"field": "centroid"},
+            "sort": {"field": "date"},
+        },
+    } == a.to_dict()
+
+
+def test_inference_aggregation() -> None:
+    a = aggs.Inference(model_id="model-id", buckets_path={"agg_name": "agg_name"})
+    assert {
+        "inference": {"buckets_path": {"agg_name": "agg_name"}, "model_id": "model-id"}
+    } == a.to_dict()
+
+
+def test_matrix_stats_aggregation() -> None:
+    a = aggs.MatrixStats(fields=["poverty", "income"])
+
+    assert {"matrix_stats": {"fields": ["poverty", "income"]}} == a.to_dict()
+
+
+def test_moving_percentiles_aggregation() -> None:
+    a = aggs.DateHistogram()
+    a.bucket("the_percentile", "percentiles", field="price", percents=[1.0, 99.0])
+    a.pipeline(
+        "the_movperc", "moving_percentiles", buckets_path="the_percentile", window=10
+    )
+
+    assert {
+        "aggs": {
+            "the_movperc": {
+                "moving_percentiles": {"buckets_path": "the_percentile", "window": 10}
+            },
+            "the_percentile": {
+                "percentiles": {"field": "price", "percents": [1.0, 99.0]}
+            },
+        },
+        "date_histogram": {},
+    } == a.to_dict()
+
+
+def test_normalize_aggregation() -> None:
+    a = aggs.Normalize(buckets_path="normalized", method="percent_of_sum")
+    assert {
+        "normalize": {"buckets_path": "normalized", "method": "percent_of_sum"}
+    } == a.to_dict()
+
+
+def test_random_sampler_aggregation() -> None:
+    a = aggs.RandomSampler(probability=0.1).metric(
+        "price_percentiles",
+        "percentiles",
+        field="price",
+    )
+
+    assert {
+        "random_sampler": {
+            "probability": 0.1,
+        },
+        "aggs": {
+            "price_percentiles": {
+                "percentiles": {"field": "price"},
+            },
+        },
+    } == a.to_dict()
+
+
+def test_adjancecy_matrix_aggregation() -> None:
+    a = aggs.AdjacencyMatrix(filters={"grpA": {"terms": {"accounts": ["hillary", "sidney"]}}, "grpB": {"terms": {"accounts": ["donald", "mitt"]}}, "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}}})  # type: ignore
+    assert {
+        "adjacency_matrix": {
+            "filters": {
+                "grpA": {"terms": {"accounts": ["hillary", "sidney"]}},
+                "grpB": {"terms": {"accounts": ["donald", "mitt"]}},
+                "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}},
+            }
+        }
+    } == a.to_dict()
+    a = aggs.AdjacencyMatrix(
+        filters={
+            "grpA": query.Terms(accounts=["hillary", "sidney"]),
+            "grpB": query.Terms(accounts=["donald", "mitt"]),
+            "grpC": query.Terms(accounts=["vladimir", "nigel"]),
+        }
+    )
+    assert {
+        "adjacency_matrix": {
+            "filters": {
+                "grpA": {"terms": {"accounts": ["hillary", "sidney"]}},
+                "grpB": {"terms": {"accounts": ["donald", "mitt"]}},
+                "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}},
+            }
+        }
+    } == a.to_dict()
+
+
+def test_top_metrics_aggregation() -> None:
+    # test the dictionary (type ignored) and fully typed alterantives
+    a = aggs.TopMetrics(metrics={"field": "m"}, sort={"s": "desc"})  # type: ignore
+    assert {
+        "top_metrics": {"metrics": {"field": "m"}, "sort": {"s": "desc"}}
+    } == a.to_dict()
+    a = aggs.TopMetrics(
+        metrics=types.TopMetricsValue(field="m"),
+        sort=types.SortOptions("s", types.FieldSort(order="desc")),
+    )
+    assert {
+        "top_metrics": {"metrics": {"field": "m"}, "sort": {"s": {"order": "desc"}}}
+    } == a.to_dict()
+
+
+def test_bucket_agg_with_filter() -> None:
+    b = aggs.Filter(query.Terms(something=[1, 2, 3]))
+
+    a = aggs.Terms(field="some_field", size=100)
+    a.bucket("b", b)
+
+    assert a.aggs["b"] == a["b"]  # a['b'] threw exception before patch #1902
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_analysis.py 9.2.0-1/test_elasticsearch/test_dsl/test_analysis.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_analysis.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_analysis.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,216 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from pytest import raises
+
+from elasticsearch.dsl import analysis
+
+
+def test_analyzer_serializes_as_name() -> None:
+    a = analysis.analyzer("my_analyzer")
+
+    assert "my_analyzer" == a.to_dict()  # type: ignore
+
+
+def test_analyzer_has_definition() -> None:
+    a = analysis.CustomAnalyzer(
+        "my_analyzer", tokenizer="keyword", filter=["lowercase"]
+    )
+
+    assert {
+        "type": "custom",
+        "tokenizer": "keyword",
+        "filter": ["lowercase"],
+    } == a.get_definition()
+
+
+def test_simple_multiplexer_filter() -> None:
+    a = analysis.analyzer(
+        "my_analyzer",
+        tokenizer="keyword",
+        filter=[
+            analysis.token_filter(
+                "my_multi", "multiplexer", filters=["lowercase", "lowercase, stop"]
+            )
+        ],
+    )
+
+    assert {
+        "analyzer": {
+            "my_analyzer": {
+                "filter": ["my_multi"],
+                "tokenizer": "keyword",
+                "type": "custom",
+            }
+        },
+        "filter": {
+            "my_multi": {
+                "filters": ["lowercase", "lowercase, stop"],
+                "type": "multiplexer",
+            }
+        },
+    } == a.get_analysis_definition()
+
+
+def test_multiplexer_with_custom_filter() -> None:
+    a = analysis.analyzer(
+        "my_analyzer",
+        tokenizer="keyword",
+        filter=[
+            analysis.token_filter(
+                "my_multi",
+                "multiplexer",
+                filters=[
+                    [analysis.token_filter("en", "snowball", language="English")],
+                    "lowercase, stop",
+                ],
+            )
+        ],
+    )
+
+    assert {
+        "analyzer": {
+            "my_analyzer": {
+                "filter": ["my_multi"],
+                "tokenizer": "keyword",
+                "type": "custom",
+            }
+        },
+        "filter": {
+            "en": {"type": "snowball", "language": "English"},
+            "my_multi": {"filters": ["en", "lowercase, stop"], "type": "multiplexer"},
+        },
+    } == a.get_analysis_definition()
+
+
+def test_conditional_token_filter() -> None:
+    a = analysis.analyzer(
+        "my_cond",
+        tokenizer=analysis.tokenizer("keyword"),
+        filter=[
+            analysis.token_filter(
+                "testing",
+                "condition",
+                script={"source": "return true"},
+                filter=[
+                    "lowercase",
+                    analysis.token_filter("en", "snowball", language="English"),
+                ],
+            ),
+            "stop",
+        ],
+    )
+
+    assert {
+        "analyzer": {
+            "my_cond": {
+                "filter": ["testing", "stop"],
+                "tokenizer": "keyword",
+                "type": "custom",
+            }
+        },
+        "filter": {
+            "en": {"language": "English", "type": "snowball"},
+            "testing": {
+                "script": {"source": "return true"},
+                "filter": ["lowercase", "en"],
+                "type": "condition",
+            },
+        },
+    } == a.get_analysis_definition()
+
+
+def test_conflicting_nested_filters_cause_error() -> None:
+    a = analysis.analyzer(
+        "my_cond",
+        tokenizer=analysis.tokenizer("keyword"),
+        filter=[
+            analysis.token_filter("en", "stemmer", language="english"),
+            analysis.token_filter(
+                "testing",
+                "condition",
+                script={"source": "return true"},
+                filter=[
+                    "lowercase",
+                    analysis.token_filter("en", "snowball", language="English"),
+                ],
+            ),
+        ],
+    )
+
+    with raises(ValueError):
+        a.get_analysis_definition()
+
+
+def test_normalizer_serializes_as_name() -> None:
+    n = analysis.normalizer("my_normalizer")
+
+    assert "my_normalizer" == n.to_dict()  # type: ignore
+
+
+def test_normalizer_has_definition() -> None:
+    n = analysis.CustomNormalizer(
+        "my_normalizer", filter=["lowercase", "asciifolding"], char_filter=["quote"]
+    )
+
+    assert {
+        "type": "custom",
+        "filter": ["lowercase", "asciifolding"],
+        "char_filter": ["quote"],
+    } == n.get_definition()
+
+
+def test_tokenizer() -> None:
+    t = analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3)
+
+    assert t.to_dict() == "trigram"  # type: ignore
+    assert {"type": "nGram", "min_gram": 3, "max_gram": 3} == t.get_definition()
+
+
+def test_custom_analyzer_can_collect_custom_items() -> None:
+    trigram = analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3)
+    my_stop = analysis.token_filter("my_stop", "stop", stopwords=["a", "b"])
+    umlauts = analysis.char_filter("umlauts", "pattern_replace", mappings=["ü=>ue"])
+    a = analysis.analyzer(
+        "my_analyzer",
+        tokenizer=trigram,
+        filter=["lowercase", my_stop],
+        char_filter=["html_strip", umlauts],
+    )
+
+    assert a.to_dict() == "my_analyzer"  # type: ignore
+    assert {
+        "analyzer": {
+            "my_analyzer": {
+                "type": "custom",
+                "tokenizer": "trigram",
+                "filter": ["lowercase", "my_stop"],
+                "char_filter": ["html_strip", "umlauts"],
+            }
+        },
+        "tokenizer": {"trigram": trigram.get_definition()},
+        "filter": {"my_stop": my_stop.get_definition()},
+        "char_filter": {"umlauts": umlauts.get_definition()},
+    } == a.get_analysis_definition()
+
+
+def test_stemmer_analyzer_can_pass_name() -> None:
+    t = analysis.token_filter(
+        "my_english_filter", name="minimal_english", type="stemmer"
+    )
+    assert t.to_dict() == "my_english_filter"  # type: ignore
+    assert {"type": "stemmer", "name": "minimal_english"} == t.get_definition()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_connections.py 9.2.0-1/test_elasticsearch/test_dsl/test_connections.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_connections.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_connections.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,143 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, List
+
+from pytest import raises
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import connections, serializer
+
+
+class DummyElasticsearch:
+    def __init__(self, *args: Any, hosts: List[str], **kwargs: Any):
+        self.hosts = hosts
+
+
+def test_default_connection_is_returned_by_default() -> None:
+    c = connections.Connections[object](elasticsearch_class=object)
+
+    con, con2 = object(), object()
+    c.add_connection("default", con)
+
+    c.add_connection("not-default", con2)
+
+    assert c.get_connection() is con
+
+
+def test_get_connection_created_connection_if_needed() -> None:
+    c = connections.Connections[DummyElasticsearch](
+        elasticsearch_class=DummyElasticsearch
+    )
+    c.configure(
+        default={"hosts": ["https://es.com:9200"]},
+        local={"hosts": ["https://localhost:9200"]},
+    )
+
+    default = c.get_connection()
+    local = c.get_connection("local")
+
+    assert isinstance(default, DummyElasticsearch)
+    assert isinstance(local, DummyElasticsearch)
+
+    assert default.hosts == ["https://es.com:9200"]
+    assert local.hosts == ["https://localhost:9200"]
+
+
+def test_configure_preserves_unchanged_connections() -> None:
+    c = connections.Connections[DummyElasticsearch](
+        elasticsearch_class=DummyElasticsearch
+    )
+
+    c.configure(
+        default={"hosts": ["https://es.com:9200"]},
+        local={"hosts": ["https://localhost:9200"]},
+    )
+    default = c.get_connection()
+    local = c.get_connection("local")
+
+    c.configure(
+        default={"hosts": ["https://not-es.com:9200"]},
+        local={"hosts": ["https://localhost:9200"]},
+    )
+    new_default = c.get_connection()
+    new_local = c.get_connection("local")
+
+    assert new_local is local
+    assert new_default is not default
+
+
+def test_remove_connection_removes_both_conn_and_conf() -> None:
+    c = connections.Connections[object](elasticsearch_class=DummyElasticsearch)
+
+    c.configure(
+        default={"hosts": ["https://es.com:9200"]},
+        local={"hosts": ["https://localhost:9200"]},
+    )
+    c.add_connection("local2", object())
+
+    c.remove_connection("default")
+    c.get_connection("local2")
+    c.remove_connection("local2")
+
+    with raises(Exception):
+        c.get_connection("local2")
+        c.get_connection("default")
+
+
+def test_create_connection_constructs_client() -> None:
+    c = connections.Connections[DummyElasticsearch](
+        elasticsearch_class=DummyElasticsearch
+    )
+    c.create_connection("testing", hosts=["https://es.com:9200"])
+
+    con = c.get_connection("testing")
+    assert con.hosts == ["https://es.com:9200"]
+
+
+def test_create_connection_adds_our_serializer() -> None:
+    c = connections.Connections[Elasticsearch](elasticsearch_class=Elasticsearch)
+    c.create_connection("testing", hosts=["https://es.com:9200"])
+
+    c_serializers = c.get_connection("testing").transport.serializers
+    assert c_serializers.serializers["application/json"] is serializer.serializer
+
+
+def test_connection_has_correct_user_agent() -> None:
+    c = connections.Connections[Elasticsearch](elasticsearch_class=Elasticsearch)
+
+    c.create_connection("testing", hosts=["https://es.com:9200"])
+    assert (
+        c.get_connection("testing")
+        ._headers["user-agent"]
+        .startswith("elasticsearch-dsl-py/")
+    )
+
+    my_client = Elasticsearch(hosts=["http://localhost:9200"])
+    my_client = my_client.options(headers={"user-agent": "my-user-agent/1.0"})
+    c.add_connection("default", my_client)
+    assert c.get_connection()._headers["user-agent"].startswith("elasticsearch-dsl-py/")
+
+    my_client = Elasticsearch(hosts=["http://localhost:9200"])
+    assert (
+        c.get_connection(my_client)
+        ._headers["user-agent"]
+        .startswith("elasticsearch-dsl-py/")
+    )
+
+    not_a_client = object()
+    assert c.get_connection(not_a_client) == not_a_client  # type: ignore[arg-type]
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_field.py 9.2.0-1/test_elasticsearch/test_dsl/test_field.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_field.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_field.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,285 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import base64
+import ipaddress
+from datetime import date, datetime, time
+from typing import cast
+
+import pytest
+from dateutil import tz
+
+from elasticsearch import dsl
+from elasticsearch.dsl import (
+    AttrDict,
+    AttrList,
+    InnerDoc,
+    Range,
+    ValidationException,
+    field,
+)
+
+
+def test_date_range_deserialization() -> None:
+    data = {"lt": "2018-01-01T00:30:10"}
+
+    r = field.DateRange().deserialize(data)
+
+    assert isinstance(r, Range)
+    assert r.lt == datetime(2018, 1, 1, 0, 30, 10)
+
+
+def test_boolean_deserialization() -> None:
+    bf = field.Boolean()
+
+    assert not bf.deserialize("false")
+    assert not bf.deserialize(False)
+    assert not bf.deserialize("")
+    assert not bf.deserialize(0)
+
+    assert bf.deserialize(True)
+    assert bf.deserialize("true")
+    assert bf.deserialize(1)
+
+
+def test_datetime_deserialization() -> None:
+    f = field.Date()
+    dt = datetime.now()
+    assert dt == f._deserialize(dt.isoformat())
+
+    d = date.today()
+    assert datetime.combine(d, time()) == f._deserialize(d.isoformat())
+
+
+def test_date_deserialization() -> None:
+    f = field.Date(format="yyyy-MM-dd")
+    d = date.today()
+    assert d == f._deserialize(d.isoformat())
+
+    dt = datetime.now()
+    assert dt.date() == f._deserialize(dt.isoformat())
+
+
+def test_date_field_can_have_default_tz() -> None:
+    f = field.Date(default_timezone="UTC")
+    now = datetime.now()
+
+    now_with_tz = cast(datetime, f._deserialize(now))
+
+    assert now_with_tz.tzinfo == tz.gettz("UTC")
+    assert now.isoformat() + "+00:00" == now_with_tz.isoformat()
+
+    now_with_tz = cast(datetime, f._deserialize(now.isoformat()))
+
+    assert now_with_tz.tzinfo == tz.gettz("UTC")
+    assert now.isoformat() + "+00:00" == now_with_tz.isoformat()
+
+
+def test_custom_field_car_wrap_other_field() -> None:
+    class MyField(field.CustomField):
+        @property
+        def builtin_type(self) -> field.Text:
+            return field.Text(**self._params)
+
+    assert {"type": "text", "index": "not_analyzed"} == MyField(
+        index="not_analyzed"
+    ).to_dict()
+
+
+def test_field_from_dict() -> None:
+    f = field.construct_field({"type": "text", "index": "not_analyzed"})
+
+    assert isinstance(f, field.Text)
+    assert {"type": "text", "index": "not_analyzed"} == f.to_dict()
+
+
+def test_multi_fields_are_accepted_and_parsed() -> None:
+    f = field.construct_field(
+        "text",
+        fields={"raw": {"type": "keyword"}, "eng": field.Text(analyzer="english")},
+    )
+
+    assert isinstance(f, field.Text)
+    assert {
+        "type": "text",
+        "fields": {
+            "raw": {"type": "keyword"},
+            "eng": {"type": "text", "analyzer": "english"},
+        },
+    } == f.to_dict()
+
+
+def test_nested_provides_direct_access_to_its_fields() -> None:
+    f = field.Nested(properties={"name": {"type": "text", "index": "not_analyzed"}})
+
+    assert "name" in f
+    assert f["name"] == field.Text(index="not_analyzed")
+
+
+def test_field_supports_multiple_analyzers() -> None:
+    f = field.Text(analyzer="snowball", search_analyzer="keyword")
+    assert {
+        "analyzer": "snowball",
+        "search_analyzer": "keyword",
+        "type": "text",
+    } == f.to_dict()
+
+
+def test_multifield_supports_multiple_analyzers() -> None:
+    f = field.Text(
+        fields={
+            "f1": field.Text(search_analyzer="keyword", analyzer="snowball"),
+            "f2": field.Text(analyzer="keyword"),
+        }
+    )
+    assert {
+        "fields": {
+            "f1": {
+                "analyzer": "snowball",
+                "search_analyzer": "keyword",
+                "type": "text",
+            },
+            "f2": {"analyzer": "keyword", "type": "text"},
+        },
+        "type": "text",
+    } == f.to_dict()
+
+
+def test_scaled_float() -> None:
+    with pytest.raises(TypeError):
+        field.ScaledFloat()  # type: ignore
+    f = field.ScaledFloat(123)
+    assert f.to_dict() == {"scaling_factor": 123, "type": "scaled_float"}
+
+
+def test_ipaddress() -> None:
+    f = field.Ip()
+    assert f.deserialize("127.0.0.1") == ipaddress.ip_address("127.0.0.1")
+    assert f.deserialize("::1") == ipaddress.ip_address("::1")
+    assert f.serialize(f.deserialize("::1")) == "::1"
+    assert f.deserialize(None) is None
+    with pytest.raises(ValueError):
+        assert f.deserialize("not_an_ipaddress")
+
+
+def test_float() -> None:
+    f = field.Float()
+    assert f.deserialize("42") == 42.0
+    assert f.deserialize(None) is None
+    with pytest.raises(ValueError):
+        assert f.deserialize("not_a_float")
+
+
+def test_integer() -> None:
+    f = field.Integer()
+    assert f.deserialize("42") == 42
+    assert f.deserialize(None) is None
+    with pytest.raises(ValueError):
+        assert f.deserialize("not_an_integer")
+
+
+def test_binary() -> None:
+    f = field.Binary()
+    assert f.deserialize(base64.b64encode(b"42")) == b"42"
+    assert f.deserialize(f.serialize(b"42")) == b"42"
+    assert f.deserialize(None) is None
+
+
+def test_constant_keyword() -> None:
+    f = field.ConstantKeyword()
+    assert f.to_dict() == {"type": "constant_keyword"}
+
+
+def test_rank_features() -> None:
+    f = field.RankFeatures()
+    assert f.to_dict() == {"type": "rank_features"}
+
+
+def test_object_dynamic_values() -> None:
+    f = field.Object(dynamic=True)
+    assert f.to_dict()["dynamic"] is True
+    f = field.Object(dynamic=False)
+    assert f.to_dict()["dynamic"] is False
+    f = field.Object(dynamic="strict")
+    assert f.to_dict()["dynamic"] == "strict"
+
+
+def test_object_disabled() -> None:
+    f = field.Object(enabled=False)
+    assert f.to_dict() == {"type": "object", "enabled": False}
+
+
+def test_object_constructor() -> None:
+    expected = {"type": "object", "properties": {"inner_int": {"type": "integer"}}}
+
+    class Inner(InnerDoc):
+        inner_int = field.Integer()
+
+    obj_from_doc = field.Object(doc_class=Inner)
+    assert obj_from_doc.to_dict() == expected
+
+    obj_from_props = field.Object(properties={"inner_int": field.Integer()})
+    assert obj_from_props.to_dict() == expected
+
+    with pytest.raises(ValidationException):
+        field.Object(doc_class=Inner, properties={"inner_int": field.Integer()})
+
+    with pytest.raises(ValidationException):
+        field.Object(doc_class=Inner, dynamic=False)
+
+
+def test_dynamic_object() -> None:
+    f = field.Object(dynamic=True)
+    assert f.deserialize({"a": "b"}).to_dict() == {"a": "b"}
+    assert f.deserialize(AttrDict({"a": "b"})).to_dict() == {"a": "b"}
+    assert f.serialize({"a": "b"}) == {"a": "b"}
+    assert f.serialize(AttrDict({"a": "b"})) == {"a": "b"}
+
+
+def test_dynamic_nested() -> None:
+    f = field.Nested(dynamic=True)
+    assert f.deserialize([{"a": "b"}, {"c": "d"}]) == [{"a": "b"}, {"c": "d"}]
+    assert f.deserialize([AttrDict({"a": "b"}), {"c": "d"}]) == [
+        {"a": "b"},
+        {"c": "d"},
+    ]
+    assert f.deserialize(AttrList([AttrDict({"a": "b"}), {"c": "d"}])) == [
+        {"a": "b"},
+        {"c": "d"},
+    ]
+    assert f.serialize([{"a": "b"}, {"c": "d"}]) == [{"a": "b"}, {"c": "d"}]
+    assert f.serialize([AttrDict({"a": "b"}), {"c": "d"}]) == [{"a": "b"}, {"c": "d"}]
+    assert f.serialize(AttrList([AttrDict({"a": "b"}), {"c": "d"}])) == [
+        {"a": "b"},
+        {"c": "d"},
+    ]
+
+
+def test_all_fields_exported() -> None:
+    """Make sure that all the generated field classes are exported at the top-level"""
+    fields = [
+        f
+        for f in dir(field)
+        if isinstance(getattr(field, f), type)
+        and issubclass(getattr(field, f), field.Field)
+    ]
+    all = dir(dsl)
+    not_found = []
+    for f in fields:
+        if f not in all:
+            not_found.append(f)
+    assert not_found == []
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,54 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.dsl import analyzer, token_filter, tokenizer
+
+
+@pytest.mark.anyio
+async def test_simulate_with_just__builtin_tokenizer(
+    async_client: AsyncElasticsearch,
+) -> None:
+    a = analyzer("my-analyzer", tokenizer="keyword")
+    tokens = (await a.async_simulate("Hello World!", using=async_client)).tokens
+
+    assert len(tokens) == 1
+    assert tokens[0].token == "Hello World!"
+
+
+@pytest.mark.anyio
+async def test_simulate_complex(async_client: AsyncElasticsearch) -> None:
+    a = analyzer(
+        "my-analyzer",
+        tokenizer=tokenizer("split_words", "simple_pattern_split", pattern=":"),
+        filter=["lowercase", token_filter("no-ifs", "stop", stopwords=["if"])],
+    )
+
+    tokens = (await a.async_simulate("if:this:works", using=async_client)).tokens
+
+    assert len(tokens) == 2
+    assert ["this", "works"] == [t.token for t in tokens]
+
+
+@pytest.mark.anyio
+async def test_simulate_builtin(async_client: AsyncElasticsearch) -> None:
+    a = analyzer("my-analyzer", "english")
+    tokens = (await a.async_simulate("fixes running")).tokens
+
+    assert ["fix", "run"] == [t.token for t in tokens]
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_document.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_document.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_document.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_document.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,934 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+# this file creates several documents using bad or no types because
+# these are still supported and should be kept functional in spite
+# of not having appropriate type hints. For that reason the comment
+# below disables many mypy checks that fails as a result of this.
+# mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined"
+
+from datetime import datetime
+from ipaddress import ip_address
+from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Optional, Tuple, Union
+
+import pytest
+from pytest import raises
+from pytz import timezone
+
+from elasticsearch import AsyncElasticsearch, ConflictError, NotFoundError
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    AttrDict,
+    Binary,
+    Boolean,
+    Date,
+    DenseVector,
+    Double,
+    InnerDoc,
+    Ip,
+    Keyword,
+    Long,
+    M,
+    Mapping,
+    MetaField,
+    Nested,
+    Object,
+    Q,
+    RankFeatures,
+    Text,
+    analyzer,
+    mapped_field,
+)
+from elasticsearch.dsl.query import Match
+from elasticsearch.dsl.types import MatchQuery
+from elasticsearch.dsl.utils import AttrList
+from elasticsearch.helpers.errors import BulkIndexError
+
+snowball = analyzer("my_snow", tokenizer="standard", filter=["lowercase", "snowball"])
+
+
+class User(InnerDoc):
+    name = Text(fields={"raw": Keyword()})
+
+
+class Wiki(AsyncDocument):
+    owner = Object(User)
+    views = Long()
+    ranked = RankFeatures()
+
+    class Index:
+        name = "test-wiki"
+
+
+class Repository(AsyncDocument):
+    owner = Object(User)
+    created_at = Date()
+    description = Text(analyzer=snowball)
+    tags = Keyword()
+
+    @classmethod
+    def search(cls) -> AsyncSearch["Repository"]:  # type: ignore[override]
+        return super().search().filter("term", commit_repo="repo")
+
+    class Index:
+        name = "git"
+
+
+class Commit(AsyncDocument):
+    committed_date = Date()
+    authored_date = Date()
+    description = Text(analyzer=snowball)
+
+    class Index:
+        name = "flat-git"
+
+    class Meta:
+        mapping = Mapping()
+
+
+class History(InnerDoc):
+    timestamp = Date()
+    diff = Text()
+
+
+class Comment(InnerDoc):
+    content = Text()
+    created_at = Date()
+    author = Object(User)
+    history = Nested(History)
+
+    class Meta:
+        dynamic = MetaField(False)
+
+
+class PullRequest(AsyncDocument):
+    comments = Nested(Comment)
+    created_at = Date()
+
+    class Index:
+        name = "test-prs"
+
+
+class SerializationDoc(AsyncDocument):
+    i = Long()
+    b = Boolean()
+    d = Double()
+    bin = Binary()
+    ip = Ip()
+
+    class Index:
+        name = "test-serialization"
+
+
+class Tags(AsyncDocument):
+    tags = Keyword(multi=True)
+
+    class Index:
+        name = "tags"
+
+
+@pytest.mark.anyio
+async def test_serialization(async_write_client: AsyncElasticsearch) -> None:
+    await SerializationDoc.init()
+    await async_write_client.index(
+        index="test-serialization",
+        id=42,
+        body={
+            "i": [1, 2, "3", None],
+            "b": [True, False, "true", "false", None],
+            "d": [0.1, "-0.1", None],
+            "bin": ["SGVsbG8gV29ybGQ=", None],
+            "ip": ["::1", "127.0.0.1", None],
+        },
+    )
+    sd = await SerializationDoc.get(id=42)
+    assert sd is not None
+
+    assert sd.i == [1, 2, 3, None]
+    assert sd.b == [True, False, True, False, None]
+    assert sd.d == [0.1, -0.1, None]
+    assert sd.bin == [b"Hello World", None]
+    assert sd.ip == [ip_address("::1"), ip_address("127.0.0.1"), None]
+
+    assert sd.to_dict() == {
+        "b": [True, False, True, False, None],
+        "bin": ["SGVsbG8gV29ybGQ=", None],
+        "d": [0.1, -0.1, None],
+        "i": [1, 2, 3, None],
+        "ip": ["::1", "127.0.0.1", None],
+    }
+
+
+@pytest.mark.anyio
+async def test_nested_inner_hits_are_wrapped_properly(async_pull_request: Any) -> None:
+    history_query = Q(
+        "nested",
+        path="comments.history",
+        inner_hits={},
+        query=Q("match", comments__history__diff="ahoj"),
+    )
+    s = PullRequest.search().query(
+        "nested", inner_hits={}, path="comments", query=history_query
+    )
+
+    response = await s.execute()
+    pr = response.hits[0]
+    assert isinstance(pr, PullRequest)
+    assert isinstance(pr.comments[0], Comment)
+    assert isinstance(pr.comments[0].history[0], History)
+
+    comment = pr.meta.inner_hits.comments.hits[0]
+    assert isinstance(comment, Comment)
+    assert comment.author.name == "honzakral"
+    assert isinstance(comment.history[0], History)
+
+    history = comment.meta.inner_hits["comments.history"].hits[0]
+    assert isinstance(history, History)
+    assert history.timestamp == datetime(2012, 1, 1)
+    assert "score" in history.meta
+
+
+@pytest.mark.anyio
+async def test_nested_inner_hits_are_deserialized_properly(
+    async_pull_request: Any,
+) -> None:
+    s = PullRequest.search().query(
+        "nested",
+        inner_hits={},
+        path="comments",
+        query=Q("match", comments__content="hello"),
+    )
+
+    response = await s.execute()
+    pr = response.hits[0]
+    assert isinstance(pr.created_at, datetime)
+    assert isinstance(pr.comments[0], Comment)
+    assert isinstance(pr.comments[0].created_at, datetime)
+
+
+@pytest.mark.anyio
+async def test_nested_top_hits_are_wrapped_properly(async_pull_request: Any) -> None:
+    s = PullRequest.search()
+    s.aggs.bucket("comments", "nested", path="comments").metric(
+        "hits", "top_hits", size=1
+    )
+
+    r = await s.execute()
+
+    print(r._d_)
+    assert isinstance(r.aggregations.comments.hits.hits[0], Comment)
+
+
+@pytest.mark.anyio
+async def test_update_object_field(async_write_client: AsyncElasticsearch) -> None:
+    await Wiki.init()
+    w = Wiki(
+        owner=User(name="Honza Kral"),
+        _id="elasticsearch-py",
+        ranked={"test1": 0.1, "topic2": 0.2},
+    )
+    await w.save()
+
+    assert "updated" == await w.update(owner=[{"name": "Honza"}, User(name="Nick")])
+    assert w.owner[0].name == "Honza"
+    assert w.owner[1].name == "Nick"
+
+    w = await Wiki.get(id="elasticsearch-py")
+    assert w.owner[0].name == "Honza"
+    assert w.owner[1].name == "Nick"
+
+    assert w.ranked == {"test1": 0.1, "topic2": 0.2}
+
+
+@pytest.mark.anyio
+async def test_update_script(async_write_client: AsyncElasticsearch) -> None:
+    await Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    await w.save()
+
+    await w.update(script="ctx._source.views += params.inc", inc=5)
+    w = await Wiki.get(id="elasticsearch-py")
+    assert w.views == 47
+
+
+@pytest.mark.anyio
+async def test_update_script_with_dict(async_write_client: AsyncElasticsearch) -> None:
+    await Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    await w.save()
+
+    await w.update(
+        script={
+            "source": "ctx._source.views += params.inc1 + params.inc2",
+            "params": {"inc1": 2},
+            "lang": "painless",
+        },
+        inc2=3,
+    )
+    w = await Wiki.get(id="elasticsearch-py")
+    assert w.views == 47
+
+
+@pytest.mark.anyio
+async def test_update_retry_on_conflict(async_write_client: AsyncElasticsearch) -> None:
+    await Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    await w.save()
+
+    w1 = await Wiki.get(id="elasticsearch-py")
+    w2 = await Wiki.get(id="elasticsearch-py")
+    assert w1 is not None
+    assert w2 is not None
+
+    await w1.update(
+        script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1
+    )
+    await w2.update(
+        script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1
+    )
+
+    w = await Wiki.get(id="elasticsearch-py")
+    assert w.views == 52
+
+
+@pytest.mark.anyio
+@pytest.mark.parametrize("retry_on_conflict", [None, 0])
+async def test_update_conflicting_version(
+    async_write_client: AsyncElasticsearch, retry_on_conflict: bool
+) -> None:
+    await Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    await w.save()
+
+    w1 = await Wiki.get(id="elasticsearch-py")
+    w2 = await Wiki.get(id="elasticsearch-py")
+    assert w1 is not None
+    assert w2 is not None
+
+    await w1.update(script="ctx._source.views += params.inc", inc=5)
+
+    with raises(ConflictError):
+        await w2.update(
+            script="ctx._source.views += params.inc",
+            inc=5,
+            retry_on_conflict=retry_on_conflict,
+        )
+
+
+@pytest.mark.anyio
+async def test_save_and_update_return_doc_meta(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    await Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    resp = await w.save(return_doc_meta=True)
+    assert resp["_index"] == "test-wiki"
+    assert resp["result"] == "created"
+    assert set(resp.keys()) == {
+        "_id",
+        "_index",
+        "_primary_term",
+        "_seq_no",
+        "_shards",
+        "_version",
+        "result",
+    }
+
+    resp = await w.update(
+        script="ctx._source.views += params.inc", inc=5, return_doc_meta=True
+    )
+    assert resp["_index"] == "test-wiki"
+    assert resp["result"] == "updated"
+    assert set(resp.keys()) == {
+        "_id",
+        "_index",
+        "_primary_term",
+        "_seq_no",
+        "_shards",
+        "_version",
+        "result",
+    }
+
+
+@pytest.mark.anyio
+async def test_init(async_write_client: AsyncElasticsearch) -> None:
+    await Repository.init(index="test-git")
+
+    assert await async_write_client.indices.exists(index="test-git")
+
+
+@pytest.mark.anyio
+async def test_get_raises_404_on_index_missing(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    with raises(NotFoundError):
+        await Repository.get("elasticsearch-dsl-php", index="not-there")
+
+
+@pytest.mark.anyio
+async def test_get_raises_404_on_non_existent_id(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    with raises(NotFoundError):
+        await Repository.get("elasticsearch-dsl-php")
+
+
+@pytest.mark.anyio
+async def test_get_returns_none_if_404_ignored(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    assert None is await Repository.get(
+        "elasticsearch-dsl-php", using=async_data_client.options(ignore_status=404)
+    )
+
+
+@pytest.mark.anyio
+async def test_get_returns_none_if_404_ignored_and_index_doesnt_exist(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    assert None is await Repository.get(
+        "42", index="not-there", using=async_data_client.options(ignore_status=404)
+    )
+
+
+@pytest.mark.anyio
+async def test_get(async_data_client: AsyncElasticsearch) -> None:
+    elasticsearch_repo = await Repository.get("elasticsearch-dsl-py")
+
+    assert isinstance(elasticsearch_repo, Repository)
+    assert elasticsearch_repo.owner.name == "elasticsearch"
+    assert datetime(2014, 3, 3) == elasticsearch_repo.created_at
+
+
+@pytest.mark.anyio
+async def test_exists_return_true(async_data_client: AsyncElasticsearch) -> None:
+    assert await Repository.exists("elasticsearch-dsl-py")
+
+
+@pytest.mark.anyio
+async def test_exists_false(async_data_client: AsyncElasticsearch) -> None:
+    assert not await Repository.exists("elasticsearch-dsl-php")
+
+
+@pytest.mark.anyio
+async def test_get_with_tz_date(async_data_client: AsyncElasticsearch) -> None:
+    first_commit = await Commit.get(
+        id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py"
+    )
+    assert first_commit is not None
+
+    tzinfo = timezone("Europe/Prague")
+    assert (
+        tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123000))
+        == first_commit.authored_date
+    )
+
+
+@pytest.mark.anyio
+async def test_save_with_tz_date(async_data_client: AsyncElasticsearch) -> None:
+    tzinfo = timezone("Europe/Prague")
+    first_commit = await Commit.get(
+        id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py"
+    )
+    assert first_commit is not None
+
+    first_commit.committed_date = tzinfo.localize(
+        datetime(2014, 5, 2, 13, 47, 19, 123456)
+    )
+    await first_commit.save()
+
+    first_commit = await Commit.get(
+        id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py"
+    )
+    assert first_commit is not None
+
+    assert (
+        tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123456))
+        == first_commit.committed_date
+    )
+
+
+COMMIT_DOCS_WITH_MISSING = [
+    {"_id": "0"},  # Missing
+    {"_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"},  # Existing
+    {"_id": "f"},  # Missing
+    {"_id": "eb3e543323f189fd7b698e66295427204fff5755"},  # Existing
+]
+
+
+@pytest.mark.anyio
+async def test_mget(async_data_client: AsyncElasticsearch) -> None:
+    commits = await Commit.mget(COMMIT_DOCS_WITH_MISSING)
+    assert commits[0] is None
+    assert commits[1] is not None
+    assert commits[1].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"
+    assert commits[2] is None
+    assert commits[3] is not None
+    assert commits[3].meta.id == "eb3e543323f189fd7b698e66295427204fff5755"
+
+
+@pytest.mark.anyio
+async def test_mget_raises_exception_when_missing_param_is_invalid(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    with raises(ValueError):
+        await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raj")
+
+
+@pytest.mark.anyio
+async def test_mget_raises_404_when_missing_param_is_raise(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    with raises(NotFoundError):
+        await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raise")
+
+
+@pytest.mark.anyio
+async def test_mget_ignores_missing_docs_when_missing_param_is_skip(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    commits = await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="skip")
+    assert commits[0] is not None
+    assert commits[0].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"
+    assert commits[1] is not None
+    assert commits[1].meta.id == "eb3e543323f189fd7b698e66295427204fff5755"
+
+
+@pytest.mark.anyio
+async def test_update_works_from_search_response(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    elasticsearch_repo = (await Repository.search().execute())[0]
+
+    await elasticsearch_repo.update(owner={"other_name": "elastic"})
+    assert "elastic" == elasticsearch_repo.owner.other_name
+
+    new_version = await Repository.get("elasticsearch-dsl-py")
+    assert new_version is not None
+    assert "elastic" == new_version.owner.other_name
+    assert "elasticsearch" == new_version.owner.name
+
+
+@pytest.mark.anyio
+async def test_update(async_data_client: AsyncElasticsearch) -> None:
+    elasticsearch_repo = await Repository.get("elasticsearch-dsl-py")
+    assert elasticsearch_repo is not None
+    v = elasticsearch_repo.meta.version
+
+    old_seq_no = elasticsearch_repo.meta.seq_no
+    await elasticsearch_repo.update(
+        owner={"new_name": "elastic"}, new_field="testing-update"
+    )
+
+    assert "elastic" == elasticsearch_repo.owner.new_name
+    assert "testing-update" == elasticsearch_repo.new_field
+
+    # assert version has been updated
+    assert elasticsearch_repo.meta.version == v + 1
+
+    new_version = await Repository.get("elasticsearch-dsl-py")
+    assert new_version is not None
+    assert "testing-update" == new_version.new_field
+    assert "elastic" == new_version.owner.new_name
+    assert "elasticsearch" == new_version.owner.name
+    assert "seq_no" in new_version.meta
+    assert new_version.meta.seq_no != old_seq_no
+    assert "primary_term" in new_version.meta
+
+
+@pytest.mark.anyio
+async def test_save_updates_existing_doc(async_data_client: AsyncElasticsearch) -> None:
+    elasticsearch_repo = await Repository.get("elasticsearch-dsl-py")
+    assert elasticsearch_repo is not None
+
+    elasticsearch_repo.new_field = "testing-save"
+    old_seq_no = elasticsearch_repo.meta.seq_no
+    assert "updated" == await elasticsearch_repo.save()
+
+    new_repo = await async_data_client.get(index="git", id="elasticsearch-dsl-py")
+    assert "testing-save" == new_repo["_source"]["new_field"]
+    assert new_repo["_seq_no"] != old_seq_no
+    assert new_repo["_seq_no"] == elasticsearch_repo.meta.seq_no
+
+
+@pytest.mark.anyio
+async def test_update_empty_field(async_client: AsyncElasticsearch) -> None:
+    await Tags._index.delete(ignore_unavailable=True)
+    await Tags.init()
+    d = Tags(id="123", tags=["a", "b"])
+    await d.save(refresh=True)
+    await d.update(tags=[], refresh=True)
+    assert d.tags == []
+
+    r = await Tags.search().execute()
+    assert r.hits[0].tags == []
+
+
+@pytest.mark.anyio
+async def test_save_automatically_uses_seq_no_and_primary_term(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    elasticsearch_repo = await Repository.get("elasticsearch-dsl-py")
+    assert elasticsearch_repo is not None
+    elasticsearch_repo.meta.seq_no += 1
+
+    with raises(ConflictError):
+        await elasticsearch_repo.save()
+
+
+@pytest.mark.anyio
+async def test_delete_automatically_uses_seq_no_and_primary_term(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    elasticsearch_repo = await Repository.get("elasticsearch-dsl-py")
+    assert elasticsearch_repo is not None
+    elasticsearch_repo.meta.seq_no += 1
+
+    with raises(ConflictError):
+        await elasticsearch_repo.delete()
+
+
+def assert_doc_equals(expected: Any, actual: Any) -> None:
+    for f in expected:
+        assert f in actual
+        assert actual[f] == expected[f]
+
+
+@pytest.mark.anyio
+async def test_can_save_to_different_index(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    test_repo = Repository(description="testing", meta={"id": 42})
+    assert await test_repo.save(index="test-document")
+
+    assert_doc_equals(
+        {
+            "found": True,
+            "_index": "test-document",
+            "_id": "42",
+            "_source": {"description": "testing"},
+        },
+        await async_write_client.get(index="test-document", id=42),
+    )
+
+
+@pytest.mark.anyio
+@pytest.mark.parametrize("validate", (True, False))
+async def test_save_without_skip_empty_will_include_empty_fields(
+    async_write_client: AsyncElasticsearch,
+    validate: bool,
+) -> None:
+    test_repo = Repository(
+        field_1=[], field_2=None, field_3={}, owner={"name": None}, meta={"id": 42}
+    )
+    assert await test_repo.save(
+        index="test-document", skip_empty=False, validate=validate
+    )
+
+    assert_doc_equals(
+        {
+            "found": True,
+            "_index": "test-document",
+            "_id": "42",
+            "_source": {
+                "field_1": [],
+                "field_2": None,
+                "field_3": {},
+                "owner": {"name": None},
+            },
+        },
+        await async_write_client.get(index="test-document", id=42),
+    )
+
+    test_repo = Repository(owner=AttrDict({"name": None}), meta={"id": 43})
+    assert await test_repo.save(
+        index="test-document", skip_empty=False, validate=validate
+    )
+
+    assert_doc_equals(
+        {
+            "found": True,
+            "_index": "test-document",
+            "_id": "43",
+            "_source": {
+                "owner": {"name": None},
+            },
+        },
+        await async_write_client.get(index="test-document", id=43),
+    )
+
+
+@pytest.mark.anyio
+async def test_delete(async_write_client: AsyncElasticsearch) -> None:
+    await async_write_client.create(
+        index="test-document",
+        id="elasticsearch-dsl-py",
+        body={
+            "organization": "elasticsearch",
+            "created_at": "2014-03-03",
+            "owner": {"name": "elasticsearch"},
+        },
+    )
+
+    test_repo = Repository(meta={"id": "elasticsearch-dsl-py"})
+    test_repo.meta.index = "test-document"
+    await test_repo.delete()
+
+    assert not await async_write_client.exists(
+        index="test-document",
+        id="elasticsearch-dsl-py",
+    )
+
+
+@pytest.mark.anyio
+async def test_search(async_data_client: AsyncElasticsearch) -> None:
+    assert await Repository.search().count() == 1
+
+
+@pytest.mark.anyio
+async def test_search_returns_proper_doc_classes(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    result = await Repository.search().execute()
+
+    elasticsearch_repo = result.hits[0]
+
+    assert isinstance(elasticsearch_repo, Repository)
+    assert elasticsearch_repo.owner.name == "elasticsearch"
+
+
+@pytest.mark.anyio
+async def test_refresh_mapping(async_data_client: AsyncElasticsearch) -> None:
+    class Commit(AsyncDocument):
+        class Index:
+            name = "git"
+
+    await Commit._index.load_mappings()
+
+    assert "stats" in Commit._index._mapping
+    assert "committer" in Commit._index._mapping
+    assert "description" in Commit._index._mapping
+    assert "committed_date" in Commit._index._mapping
+    assert isinstance(Commit._index._mapping["committed_date"], Date)
+
+
+@pytest.mark.anyio
+async def test_highlight_in_meta(async_data_client: AsyncElasticsearch) -> None:
+    commit = (
+        await Commit.search()
+        .query("match", description="inverting")
+        .highlight("description")
+        .execute()
+    )[0]
+
+    assert isinstance(commit, Commit)
+    assert "description" in commit.meta.highlight
+    assert isinstance(commit.meta.highlight["description"], AttrList)
+    assert len(commit.meta.highlight["description"]) > 0
+
+
+@pytest.mark.anyio
+async def test_bulk(async_data_client: AsyncElasticsearch) -> None:
+    class Address(InnerDoc):
+        street: str
+        active: bool
+
+    class Doc(AsyncDocument):
+        if TYPE_CHECKING:
+            _id: int
+        name: str
+        age: int
+        languages: List[str] = mapped_field(Keyword())
+        addresses: List[Address]
+
+        class Index:
+            name = "bulk-index"
+
+    await Doc._index.delete(ignore_unavailable=True)
+    await Doc.init()
+
+    async def gen1() -> AsyncIterator[Union[Doc, Dict[str, Any]]]:
+        yield Doc(
+            name="Joe",
+            age=33,
+            languages=["en", "fr"],
+            addresses=[
+                Address(street="123 Main St", active=True),
+                Address(street="321 Park Dr.", active=False),
+            ],
+        )
+        yield Doc(name="Susan", age=20, languages=["en"])
+        yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)}
+
+    await Doc.bulk(gen1(), refresh=True)
+    docs = list(await Doc.search().execute())
+    assert len(docs) == 3
+    assert docs[0].to_dict() == {
+        "name": "Joe",
+        "age": 33,
+        "languages": [
+            "en",
+            "fr",
+        ],
+        "addresses": [
+            {
+                "active": True,
+                "street": "123 Main St",
+            },
+            {
+                "active": False,
+                "street": "321 Park Dr.",
+            },
+        ],
+    }
+    assert docs[1].to_dict() == {
+        "name": "Susan",
+        "age": 20,
+        "languages": ["en"],
+    }
+    assert docs[2].to_dict() == {
+        "name": "Sarah",
+        "age": 45,
+    }
+    assert docs[2].meta.id == "45"
+
+    async def gen2() -> AsyncIterator[Union[Doc, Dict[str, Any]]]:
+        yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)}
+
+    # a "create" action with an existing id should fail
+    with raises(BulkIndexError):
+        await Doc.bulk(gen2(), refresh=True)
+
+    async def gen3() -> AsyncIterator[Union[Doc, Dict[str, Any]]]:
+        yield Doc(_id="45", name="Sarah", age=45, languages=["es"])
+        yield {"_op_type": "delete", "_id": docs[1].meta.id}
+
+    await Doc.bulk(gen3(), refresh=True)
+    with raises(NotFoundError):
+        await Doc.get(docs[1].meta.id)
+    doc = await Doc.get("45")
+    assert doc is not None
+    assert (doc).to_dict() == {
+        "name": "Sarah",
+        "age": 45,
+        "languages": ["es"],
+    }
+
+
+@pytest.mark.anyio
+async def test_legacy_dense_vector(
+    async_client: AsyncElasticsearch, es_version: Tuple[int, ...]
+) -> None:
+    if es_version >= (8, 16):
+        pytest.skip("this test is a legacy version for Elasticsearch 8.15 or older")
+
+    class Doc(AsyncDocument):
+        float_vector: List[float] = mapped_field(DenseVector(dims=3))
+
+        class Index:
+            name = "vectors"
+
+    await Doc._index.delete(ignore_unavailable=True)
+    await Doc.init()
+
+    doc = Doc(float_vector=[1.0, 1.2, 2.3])
+    await doc.save(refresh=True)
+
+    docs = await Doc.search().execute()
+    assert len(docs) == 1
+    assert docs[0].float_vector == doc.float_vector
+
+
+@pytest.mark.anyio
+async def test_dense_vector(
+    async_client: AsyncElasticsearch, es_version: Tuple[int, ...]
+) -> None:
+    if es_version < (8, 16):
+        pytest.skip("this test requires Elasticsearch 8.16 or newer")
+
+    class Doc(AsyncDocument):
+        float_vector: List[float] = mapped_field(DenseVector())
+        byte_vector: List[int] = mapped_field(DenseVector(element_type="byte"))
+        bit_vector: List[int] = mapped_field(DenseVector(element_type="bit"))
+
+        class Index:
+            name = "vectors"
+
+    await Doc._index.delete(ignore_unavailable=True)
+    await Doc.init()
+
+    doc = Doc(
+        float_vector=[1.0, 1.2, 2.3],
+        byte_vector=[12, 23, 34, 45],
+        bit_vector=[18, -43, -112],
+    )
+    await doc.save(refresh=True)
+
+    docs = await Doc.search().execute()
+    assert len(docs) == 1
+    assert [round(v, 1) for v in docs[0].float_vector] == doc.float_vector
+    assert docs[0].byte_vector == doc.byte_vector
+    assert docs[0].bit_vector == doc.bit_vector
+
+
+@pytest.mark.anyio
+async def test_copy_to(async_client: AsyncElasticsearch) -> None:
+    class Person(AsyncDocument):
+        first_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"]))
+        last_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"]))
+        birth_place: M[str] = mapped_field(Text(copy_to="all"))
+        full_name: M[Optional[str]] = mapped_field(init=False)
+        all: M[Optional[str]] = mapped_field(init=False)
+
+        class Index:
+            name = "people"
+
+    await Person._index.delete(ignore_unavailable=True)
+    await Person.init()
+
+    person = Person(first_name="Jane", last_name="Doe", birth_place="Springfield")
+    await person.save()
+    await Person._index.refresh()
+
+    match = (
+        await Person.search()
+        .query(Match(Person.full_name, MatchQuery(query="Jane")))
+        .execute()
+    )
+    assert len(match) == 1
+
+    match = (
+        await Person.search()
+        .query(Match(Person.all, MatchQuery(query="Doe")))
+        .execute()
+    )
+    assert len(match) == 1
+
+    match = (
+        await Person.search()
+        .query(Match(Person.full_name, MatchQuery(query="Springfield")))
+        .execute()
+    )
+    assert len(match) == 0
+
+    match = (
+        await Person.search()
+        .query(Match(Person.all, MatchQuery(query="Springfield")))
+        .execute()
+    )
+    assert len(match) == 1
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_esql.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_esql.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_esql.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_esql.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,254 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch.dsl import AsyncDocument, InnerDoc, M
+from elasticsearch.esql import ESQL, E, functions
+
+
+class Address(InnerDoc):
+    address: M[str]
+    city: M[str]
+
+
+class Employee(AsyncDocument):
+    emp_no: M[int]
+    first_name: M[str]
+    last_name: M[str]
+    height: M[float]
+    still_hired: M[bool]
+    address: M[Address]
+
+    class Index:
+        name = "employees"
+
+
+async def load_db():
+    data = [
+        [
+            10000,
+            "Joseph",
+            "Wall",
+            2.2,
+            True,
+            Address(address="8875 Long Shoals Suite 441", city="Marcville, TX"),
+        ],
+        [
+            10001,
+            "Stephanie",
+            "Ward",
+            1.749,
+            True,
+            Address(address="90162 Carter Harbor Suite 099", city="Davisborough, DE"),
+        ],
+        [
+            10002,
+            "David",
+            "Keller",
+            1.872,
+            True,
+            Address(address="6697 Patrick Union Suite 797", city="Fuentesmouth, SD"),
+        ],
+        [
+            10003,
+            "Roger",
+            "Hinton",
+            1.694,
+            False,
+            Address(address="809 Kelly Mountains", city="South Megan, DE"),
+        ],
+        [
+            10004,
+            "Joshua",
+            "Garcia",
+            1.661,
+            False,
+            Address(address="718 Angela Forks", city="Port Erinland, MA"),
+        ],
+        [
+            10005,
+            "Matthew",
+            "Richards",
+            1.633,
+            False,
+            Address(address="2869 Brown Mountains", city="New Debra, NH"),
+        ],
+        [
+            10006,
+            "Maria",
+            "Luna",
+            1.893,
+            True,
+            Address(address="5861 Morgan Springs", city="Lake Daniel, WI"),
+        ],
+        [
+            10007,
+            "Angela",
+            "Navarro",
+            1.604,
+            False,
+            Address(address="2848 Allen Station", city="Saint Joseph, OR"),
+        ],
+        [
+            10008,
+            "Maria",
+            "Cannon",
+            2.079,
+            False,
+            Address(address="322 NW Johnston", city="Bakerburgh, MP"),
+        ],
+        [
+            10009,
+            "Joseph",
+            "Sutton",
+            2.025,
+            True,
+            Address(address="77 Cardinal E", city="Lakestown, IL"),
+        ],
+    ]
+    if await Employee._index.exists():
+        await Employee._index.delete()
+    await Employee.init()
+
+    for e in data:
+        employee = Employee(
+            emp_no=e[0],
+            first_name=e[1],
+            last_name=e[2],
+            height=e[3],
+            still_hired=e[4],
+            address=e[5],
+        )
+        await employee.save()
+    await Employee._index.refresh()
+
+
+@pytest.mark.anyio
+async def test_esql(async_client):
+    await load_db()
+
+    # get the full names of the employees
+    query = (
+        ESQL.from_(Employee)
+        .eval(full_name=functions.concat(Employee.first_name, " ", Employee.last_name))
+        .keep("full_name")
+        .sort("full_name")
+        .limit(10)
+    )
+    r = await async_client.esql.query(query=str(query))
+    assert r.body["values"] == [
+        ["Angela Navarro"],
+        ["David Keller"],
+        ["Joseph Sutton"],
+        ["Joseph Wall"],
+        ["Joshua Garcia"],
+        ["Maria Cannon"],
+        ["Maria Luna"],
+        ["Matthew Richards"],
+        ["Roger Hinton"],
+        ["Stephanie Ward"],
+    ]
+
+    # get the average height of all hired employees
+    query = ESQL.from_(Employee).stats(
+        avg_height=functions.round(functions.avg(Employee.height), 2).where(
+            Employee.still_hired == True  # noqa: E712
+        )
+    )
+    r = await async_client.esql.query(query=str(query))
+    assert r.body["values"] == [[1.95]]
+
+    # find employees by name using a parameter
+    query = (
+        ESQL.from_(Employee)
+        .where(Employee.first_name == E("?"))
+        .keep(Employee.last_name)
+        .sort(Employee.last_name.desc())
+    )
+    r = await async_client.esql.query(query=str(query), params=["Maria"])
+    assert r.body["values"] == [["Luna"], ["Cannon"]]
+
+
+@pytest.mark.anyio
+async def test_esql_dsl(async_client):
+    await load_db()
+
+    # get employees with first name "Maria"
+    query = (
+        Employee.esql_from()
+        .where(Employee.first_name == "Maria")
+        .sort("last_name")
+        .limit(10)
+    )
+    marias = []
+    async for emp in Employee.esql_execute(query):
+        marias.append(emp)
+    assert len(marias) == 2
+    assert marias[0].last_name == "Cannon"
+    assert marias[0].address.address == "322 NW Johnston"
+    assert marias[0].address.city == "Bakerburgh, MP"
+    assert marias[1].last_name == "Luna"
+    assert marias[1].address.address == "5861 Morgan Springs"
+    assert marias[1].address.city == "Lake Daniel, WI"
+
+    # run a query with a missing field
+    query = (
+        Employee.esql_from()
+        .where(Employee.first_name == "Maria")
+        .drop(Employee.address.city)
+        .sort("last_name")
+        .limit(10)
+    )
+    with pytest.raises(ValueError):
+        await Employee.esql_execute(query).__anext__()
+    marias = []
+    async for emp in Employee.esql_execute(query, ignore_missing_fields=True):
+        marias.append(emp)
+    assert marias[0].last_name == "Cannon"
+    assert marias[0].address.address == "322 NW Johnston"
+    assert marias[0].address.city is None
+    assert marias[1].last_name == "Luna"
+    assert marias[1].address.address == "5861 Morgan Springs"
+    assert marias[1].address.city is None
+
+    # run a query with additional calculated fields
+    query = (
+        Employee.esql_from()
+        .where(Employee.first_name == "Maria")
+        .eval(
+            full_name=functions.concat(Employee.first_name, " ", Employee.last_name),
+            height_cm=functions.to_integer(Employee.height * 100),
+        )
+        .sort("last_name")
+        .limit(10)
+    )
+    assert isinstance(await Employee.esql_execute(query).__anext__(), Employee)
+    assert isinstance(
+        await Employee.esql_execute(query, return_additional=True).__anext__(), tuple
+    )
+    marias = []
+    async for emp, extra in Employee.esql_execute(query, return_additional=True):
+        marias.append([emp, extra])
+    assert marias[0][0].last_name == "Cannon"
+    assert marias[0][0].address.address == "322 NW Johnston"
+    assert marias[0][0].address.city == "Bakerburgh, MP"
+    assert marias[0][1] == {"full_name": "Maria Cannon", "height_cm": 208}
+    assert marias[1][0].last_name == "Luna"
+    assert marias[1][0].address.address == "5861 Morgan Springs"
+    assert marias[1][0].address.city == "Lake Daniel, WI"
+    assert marias[1][1] == {"full_name": "Maria Luna", "height_cm": 189}
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_faceted_search.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_faceted_search.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_faceted_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_faceted_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,305 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime
+from typing import Tuple, Type
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.dsl import A, AsyncDocument, AsyncSearch, Boolean, Date, Keyword
+from elasticsearch.dsl.faceted_search import (
+    AsyncFacetedSearch,
+    DateHistogramFacet,
+    NestedFacet,
+    RangeFacet,
+    TermsFacet,
+)
+
+from .test_document import PullRequest
+
+
+class Repos(AsyncDocument):
+    is_public = Boolean()
+    created_at = Date()
+
+    class Index:
+        name = "git"
+
+
+class Commit(AsyncDocument):
+    files = Keyword()
+    committed_date = Date()
+
+    class Index:
+        name = "git"
+
+
+class MetricSearch(AsyncFacetedSearch):
+    index = "git"
+    doc_types = [Commit]
+
+    facets = {
+        "files": TermsFacet(field="files", metric=A("max", field="committed_date")),
+    }
+
+
+@pytest.fixture
+def commit_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]:
+    if es_version >= (7, 2):
+        interval_kwargs = {"fixed_interval": "1d"}
+    else:
+        interval_kwargs = {"interval": "day"}
+
+    class CommitSearch(AsyncFacetedSearch):
+        index = "flat-git"
+        fields = (
+            "description",
+            "files",
+        )
+
+        facets = {
+            "files": TermsFacet(field="files"),
+            "frequency": DateHistogramFacet(
+                field="authored_date", min_doc_count=1, **interval_kwargs
+            ),
+            "deletions": RangeFacet(
+                field="stats.deletions",
+                ranges=[("ok", (None, 1)), ("good", (1, 5)), ("better", (5, None))],
+            ),
+        }
+
+    return CommitSearch
+
+
+@pytest.fixture
+def repo_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]:
+    interval_type = "calendar_interval" if es_version >= (7, 2) else "interval"
+
+    class RepoSearch(AsyncFacetedSearch):
+        index = "git"
+        doc_types = [Repos]
+        facets = {
+            "public": TermsFacet(field="is_public"),
+            "created": DateHistogramFacet(
+                field="created_at", **{interval_type: "month"}
+            ),
+        }
+
+        def search(self) -> AsyncSearch:
+            s = super().search()
+            return s.filter("term", commit_repo="repo")
+
+    return RepoSearch
+
+
+@pytest.fixture
+def pr_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]:
+    interval_type = "calendar_interval" if es_version >= (7, 2) else "interval"
+
+    class PRSearch(AsyncFacetedSearch):
+        index = "test-prs"
+        doc_types = [PullRequest]
+        facets = {
+            "comments": NestedFacet(
+                "comments",
+                DateHistogramFacet(
+                    field="comments.created_at", **{interval_type: "month"}
+                ),
+            )
+        }
+
+    return PRSearch
+
+
+@pytest.mark.anyio
+async def test_facet_with_custom_metric(async_data_client: AsyncElasticsearch) -> None:
+    ms = MetricSearch()
+    r = await ms.execute()
+
+    dates = [f[1] for f in r.facets.files]
+    assert dates == list(sorted(dates, reverse=True))
+    assert dates[0] == 1399038439000
+
+
+@pytest.mark.anyio
+async def test_nested_facet(
+    async_pull_request: PullRequest, pr_search_cls: Type[AsyncFacetedSearch]
+) -> None:
+    prs = pr_search_cls()
+    r = await prs.execute()
+
+    assert r.hits.total.value == 1  # type: ignore[attr-defined]
+    assert [(datetime(2018, 1, 1, 0, 0), 1, False)] == r.facets.comments
+
+
+@pytest.mark.anyio
+async def test_nested_facet_with_filter(
+    async_pull_request: PullRequest, pr_search_cls: Type[AsyncFacetedSearch]
+) -> None:
+    prs = pr_search_cls(filters={"comments": datetime(2018, 1, 1, 0, 0)})
+    r = await prs.execute()
+
+    assert r.hits.total.value == 1  # type: ignore[attr-defined]
+    assert [(datetime(2018, 1, 1, 0, 0), 1, True)] == r.facets.comments
+
+    prs = pr_search_cls(filters={"comments": datetime(2018, 2, 1, 0, 0)})
+    r = await prs.execute()
+    assert not r.hits
+
+
+@pytest.mark.anyio
+async def test_datehistogram_facet(
+    async_data_client: AsyncElasticsearch, repo_search_cls: Type[AsyncFacetedSearch]
+) -> None:
+    rs = repo_search_cls()
+    r = await rs.execute()
+
+    assert r.hits.total.value == 1  # type: ignore[attr-defined]
+    assert [(datetime(2014, 3, 1, 0, 0), 1, False)] == r.facets.created
+
+
+@pytest.mark.anyio
+async def test_boolean_facet(
+    async_data_client: AsyncElasticsearch, repo_search_cls: Type[AsyncFacetedSearch]
+) -> None:
+    rs = repo_search_cls()
+    r = await rs.execute()
+
+    assert r.hits.total.value == 1  # type: ignore[attr-defined]
+    assert [(True, 1, False)] == r.facets.public
+    value, count, selected = r.facets.public[0]
+    assert value is True
+
+
+@pytest.mark.anyio
+async def test_empty_search_finds_everything(
+    async_data_client: AsyncElasticsearch,
+    es_version: Tuple[int, ...],
+    commit_search_cls: Type[AsyncFacetedSearch],
+) -> None:
+    cs = commit_search_cls()
+    r = await cs.execute()
+
+    assert r.hits.total.value == 52  # type: ignore[attr-defined]
+    assert [
+        ("elasticsearch_dsl", 40, False),
+        ("test_elasticsearch_dsl", 35, False),
+        ("elasticsearch_dsl/query.py", 19, False),
+        ("test_elasticsearch_dsl/test_search.py", 15, False),
+        ("elasticsearch_dsl/utils.py", 14, False),
+        ("test_elasticsearch_dsl/test_query.py", 13, False),
+        ("elasticsearch_dsl/search.py", 12, False),
+        ("elasticsearch_dsl/aggs.py", 11, False),
+        ("test_elasticsearch_dsl/test_result.py", 5, False),
+        ("elasticsearch_dsl/result.py", 3, False),
+    ] == r.facets.files
+
+    assert [
+        (datetime(2014, 3, 3, 0, 0), 2, False),
+        (datetime(2014, 3, 4, 0, 0), 1, False),
+        (datetime(2014, 3, 5, 0, 0), 3, False),
+        (datetime(2014, 3, 6, 0, 0), 3, False),
+        (datetime(2014, 3, 7, 0, 0), 9, False),
+        (datetime(2014, 3, 10, 0, 0), 2, False),
+        (datetime(2014, 3, 15, 0, 0), 4, False),
+        (datetime(2014, 3, 21, 0, 0), 2, False),
+        (datetime(2014, 3, 23, 0, 0), 2, False),
+        (datetime(2014, 3, 24, 0, 0), 10, False),
+        (datetime(2014, 4, 20, 0, 0), 2, False),
+        (datetime(2014, 4, 22, 0, 0), 2, False),
+        (datetime(2014, 4, 25, 0, 0), 3, False),
+        (datetime(2014, 4, 26, 0, 0), 2, False),
+        (datetime(2014, 4, 27, 0, 0), 2, False),
+        (datetime(2014, 5, 1, 0, 0), 2, False),
+        (datetime(2014, 5, 2, 0, 0), 1, False),
+    ] == r.facets.frequency
+
+    assert [
+        ("ok", 19, False),
+        ("good", 14, False),
+        ("better", 19, False),
+    ] == r.facets.deletions
+
+
+@pytest.mark.anyio
+async def test_term_filters_are_shown_as_selected_and_data_is_filtered(
+    async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch]
+) -> None:
+    cs = commit_search_cls(filters={"files": "test_elasticsearch_dsl"})
+
+    r = await cs.execute()
+
+    assert 35 == r.hits.total.value  # type: ignore[attr-defined]
+    assert [
+        ("elasticsearch_dsl", 40, False),
+        ("test_elasticsearch_dsl", 35, True),  # selected
+        ("elasticsearch_dsl/query.py", 19, False),
+        ("test_elasticsearch_dsl/test_search.py", 15, False),
+        ("elasticsearch_dsl/utils.py", 14, False),
+        ("test_elasticsearch_dsl/test_query.py", 13, False),
+        ("elasticsearch_dsl/search.py", 12, False),
+        ("elasticsearch_dsl/aggs.py", 11, False),
+        ("test_elasticsearch_dsl/test_result.py", 5, False),
+        ("elasticsearch_dsl/result.py", 3, False),
+    ] == r.facets.files
+
+    assert [
+        (datetime(2014, 3, 3, 0, 0), 1, False),
+        (datetime(2014, 3, 5, 0, 0), 2, False),
+        (datetime(2014, 3, 6, 0, 0), 3, False),
+        (datetime(2014, 3, 7, 0, 0), 6, False),
+        (datetime(2014, 3, 10, 0, 0), 1, False),
+        (datetime(2014, 3, 15, 0, 0), 3, False),
+        (datetime(2014, 3, 21, 0, 0), 2, False),
+        (datetime(2014, 3, 23, 0, 0), 1, False),
+        (datetime(2014, 3, 24, 0, 0), 7, False),
+        (datetime(2014, 4, 20, 0, 0), 1, False),
+        (datetime(2014, 4, 25, 0, 0), 3, False),
+        (datetime(2014, 4, 26, 0, 0), 2, False),
+        (datetime(2014, 4, 27, 0, 0), 1, False),
+        (datetime(2014, 5, 1, 0, 0), 1, False),
+        (datetime(2014, 5, 2, 0, 0), 1, False),
+    ] == r.facets.frequency
+
+    assert [
+        ("ok", 12, False),
+        ("good", 10, False),
+        ("better", 13, False),
+    ] == r.facets.deletions
+
+
+@pytest.mark.anyio
+async def test_range_filters_are_shown_as_selected_and_data_is_filtered(
+    async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch]
+) -> None:
+    cs = commit_search_cls(filters={"deletions": "better"})
+
+    r = await cs.execute()
+
+    assert 19 == r.hits.total.value  # type: ignore[attr-defined]
+
+
+@pytest.mark.anyio
+async def test_pagination(
+    async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch]
+) -> None:
+    cs = commit_search_cls()
+    cs = cs[0:20]
+
+    assert 52 == await cs.count()
+    assert 20 == len(await cs.execute())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_index.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_index.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_index.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_index.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,162 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.dsl import (
+    AsyncComposableIndexTemplate,
+    AsyncDocument,
+    AsyncIndex,
+    AsyncIndexTemplate,
+    Date,
+    Text,
+    analysis,
+)
+
+
+class Post(AsyncDocument):
+    title = Text(analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword"))
+    published_from = Date()
+
+
+@pytest.mark.anyio
+async def test_index_template_works(async_write_client: AsyncElasticsearch) -> None:
+    it = AsyncIndexTemplate("test-template", "test-legacy-*")
+    it.document(Post)
+    it.settings(number_of_replicas=0, number_of_shards=1)
+    await it.save()
+
+    i = AsyncIndex("test-legacy-blog")
+    await i.create()
+
+    assert {
+        "test-legacy-blog": {
+            "mappings": {
+                "properties": {
+                    "title": {"type": "text", "analyzer": "my_analyzer"},
+                    "published_from": {"type": "date"},
+                }
+            }
+        }
+    } == await async_write_client.indices.get_mapping(index="test-legacy-blog")
+
+
+@pytest.mark.anyio
+async def test_composable_index_template_works(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    it = AsyncComposableIndexTemplate("test-template", "test-*")
+    it.document(Post)
+    it.settings(number_of_replicas=0, number_of_shards=1)
+    await it.save()
+
+    i = AsyncIndex("test-blog")
+    await i.create()
+
+    assert {
+        "test-blog": {
+            "mappings": {
+                "properties": {
+                    "title": {"type": "text", "analyzer": "my_analyzer"},
+                    "published_from": {"type": "date"},
+                }
+            }
+        }
+    } == await async_write_client.indices.get_mapping(index="test-blog")
+
+
+@pytest.mark.anyio
+async def test_index_can_be_saved_even_with_settings(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    i = AsyncIndex("test-blog", using=async_write_client)
+    i.settings(number_of_shards=3, number_of_replicas=0)
+    await i.save()
+    i.settings(number_of_replicas=1)
+    await i.save()
+
+    assert (
+        "1"
+        == (await i.get_settings())["test-blog"]["settings"]["index"][
+            "number_of_replicas"
+        ]
+    )
+
+
+@pytest.mark.anyio
+async def test_index_exists(async_data_client: AsyncElasticsearch) -> None:
+    assert await AsyncIndex("git").exists()
+    assert not await AsyncIndex("not-there").exists()
+
+
+@pytest.mark.anyio
+async def test_index_can_be_created_with_settings_and_mappings(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    i = AsyncIndex("test-blog", using=async_write_client)
+    i.document(Post)
+    i.settings(number_of_replicas=0, number_of_shards=1)
+    await i.create()
+
+    assert {
+        "test-blog": {
+            "mappings": {
+                "properties": {
+                    "title": {"type": "text", "analyzer": "my_analyzer"},
+                    "published_from": {"type": "date"},
+                }
+            }
+        }
+    } == await async_write_client.indices.get_mapping(index="test-blog")
+
+    settings = await async_write_client.indices.get_settings(index="test-blog")
+    assert settings["test-blog"]["settings"]["index"]["number_of_replicas"] == "0"
+    assert settings["test-blog"]["settings"]["index"]["number_of_shards"] == "1"
+    assert settings["test-blog"]["settings"]["index"]["analysis"] == {
+        "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}}
+    }
+
+
+@pytest.mark.anyio
+async def test_delete(async_write_client: AsyncElasticsearch) -> None:
+    await async_write_client.indices.create(
+        index="test-index",
+        body={"settings": {"number_of_replicas": 0, "number_of_shards": 1}},
+    )
+
+    i = AsyncIndex("test-index", using=async_write_client)
+    await i.delete()
+    assert not await async_write_client.indices.exists(index="test-index")
+
+
+@pytest.mark.anyio
+async def test_multiple_indices_with_same_doc_type_work(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    i1 = AsyncIndex("test-index-1", using=async_write_client)
+    i2 = AsyncIndex("test-index-2", using=async_write_client)
+
+    for i in (i1, i2):
+        i.document(Post)
+        await i.create()
+
+    for j in ("test-index-1", "test-index-2"):
+        settings = await async_write_client.indices.get_settings(index=j)
+        assert settings[j]["settings"]["index"]["analysis"] == {
+            "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}}
+        }
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,171 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+from pytest import raises
+
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.dsl import AsyncMapping, analysis, exceptions
+
+
+@pytest.mark.anyio
+async def test_mapping_saved_into_es(async_write_client: AsyncElasticsearch) -> None:
+    m = AsyncMapping()
+    m.field(
+        "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword")
+    )
+    m.field("tags", "keyword")
+    await m.save("test-mapping", using=async_write_client)
+
+    assert {
+        "test-mapping": {
+            "mappings": {
+                "properties": {
+                    "name": {"type": "text", "analyzer": "my_analyzer"},
+                    "tags": {"type": "keyword"},
+                }
+            }
+        }
+    } == await async_write_client.indices.get_mapping(index="test-mapping")
+
+
+@pytest.mark.anyio
+async def test_mapping_saved_into_es_when_index_already_exists_closed(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    m = AsyncMapping()
+    m.field(
+        "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword")
+    )
+    await async_write_client.indices.create(index="test-mapping")
+
+    with raises(exceptions.IllegalOperation):
+        await m.save("test-mapping", using=async_write_client)
+
+    await async_write_client.cluster.health(
+        index="test-mapping", wait_for_status="yellow"
+    )
+    await async_write_client.indices.close(index="test-mapping")
+    await m.save("test-mapping", using=async_write_client)
+
+    assert {
+        "test-mapping": {
+            "mappings": {
+                "properties": {"name": {"type": "text", "analyzer": "my_analyzer"}}
+            }
+        }
+    } == await async_write_client.indices.get_mapping(index="test-mapping")
+
+
+@pytest.mark.anyio
+async def test_mapping_saved_into_es_when_index_already_exists_with_analysis(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    m = AsyncMapping()
+    analyzer = analysis.analyzer("my_analyzer", tokenizer="keyword")
+    m.field("name", "text", analyzer=analyzer)
+
+    new_analysis = analyzer.get_analysis_definition()
+    new_analysis["analyzer"]["other_analyzer"] = {
+        "type": "custom",
+        "tokenizer": "whitespace",
+    }
+    await async_write_client.indices.create(
+        index="test-mapping", body={"settings": {"analysis": new_analysis}}
+    )
+
+    m.field("title", "text", analyzer=analyzer)
+    await m.save("test-mapping", using=async_write_client)
+
+    assert {
+        "test-mapping": {
+            "mappings": {
+                "properties": {
+                    "name": {"type": "text", "analyzer": "my_analyzer"},
+                    "title": {"type": "text", "analyzer": "my_analyzer"},
+                }
+            }
+        }
+    } == await async_write_client.indices.get_mapping(index="test-mapping")
+
+
+@pytest.mark.anyio
+async def test_mapping_gets_updated_from_es(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    await async_write_client.indices.create(
+        index="test-mapping",
+        body={
+            "settings": {"number_of_shards": 1, "number_of_replicas": 0},
+            "mappings": {
+                "date_detection": False,
+                "properties": {
+                    "title": {
+                        "type": "text",
+                        "analyzer": "snowball",
+                        "fields": {"raw": {"type": "keyword"}},
+                    },
+                    "created_at": {"type": "date"},
+                    "comments": {
+                        "type": "nested",
+                        "properties": {
+                            "created": {"type": "date"},
+                            "author": {
+                                "type": "text",
+                                "analyzer": "snowball",
+                                "fields": {"raw": {"type": "keyword"}},
+                            },
+                        },
+                    },
+                },
+            },
+        },
+    )
+
+    m = await AsyncMapping.from_es("test-mapping", using=async_write_client)
+
+    assert ["comments", "created_at", "title"] == list(
+        sorted(m.properties.properties._d_.keys())  # type: ignore[attr-defined]
+    )
+    assert {
+        "date_detection": False,
+        "properties": {
+            "comments": {
+                "type": "nested",
+                "properties": {
+                    "created": {"type": "date"},
+                    "author": {
+                        "analyzer": "snowball",
+                        "fields": {"raw": {"type": "keyword"}},
+                        "type": "text",
+                    },
+                },
+            },
+            "created_at": {"type": "date"},
+            "title": {
+                "analyzer": "snowball",
+                "fields": {"raw": {"type": "keyword"}},
+                "type": "text",
+            },
+        },
+    } == m.to_dict()
+
+    # test same with alias
+    await async_write_client.indices.put_alias(index="test-mapping", name="test-alias")
+
+    m2 = await AsyncMapping.from_es("test-alias", using=async_write_client)
+    assert m2.to_dict() == m.to_dict()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_search.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_search.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,304 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+import pytest
+from pytest import raises
+
+from elasticsearch import ApiError, AsyncElasticsearch
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncMultiSearch,
+    AsyncSearch,
+    Date,
+    Keyword,
+    Q,
+    Text,
+)
+from elasticsearch.dsl.response import aggs
+
+from ..test_data import FLAT_DATA
+
+
+class Repository(AsyncDocument):
+    created_at = Date()
+    description = Text(analyzer="snowball")
+    tags = Keyword()
+
+    @classmethod
+    def search(cls) -> AsyncSearch["Repository"]:  # type: ignore[override]
+        return super().search().filter("term", commit_repo="repo")
+
+    class Index:
+        name = "git"
+
+
+class Commit(AsyncDocument):
+    class Index:
+        name = "flat-git"
+
+
+@pytest.mark.anyio
+async def test_filters_aggregation_buckets_are_accessible(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    has_tests_query = Q("term", files="test_elasticsearch_dsl")
+    s = Commit.search()[0:0]
+    s.aggs.bucket("top_authors", "terms", field="author.name.raw").bucket(
+        "has_tests", "filters", filters={"yes": has_tests_query, "no": ~has_tests_query}
+    ).metric("lines", "stats", field="stats.lines")
+
+    response = await s.execute()
+
+    assert isinstance(
+        response.aggregations.top_authors.buckets[0].has_tests.buckets.yes, aggs.Bucket
+    )
+    assert (
+        35
+        == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.doc_count
+    )
+    assert (
+        228
+        == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.lines.max
+    )
+
+
+@pytest.mark.anyio
+async def test_top_hits_are_wrapped_in_response(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    s = Commit.search()[0:0]
+    s.aggs.bucket("top_authors", "terms", field="author.name.raw").metric(
+        "top_commits", "top_hits", size=5
+    )
+    response = await s.execute()
+
+    top_commits = response.aggregations.top_authors.buckets[0].top_commits
+    assert isinstance(top_commits, aggs.TopHitsData)
+    assert 5 == len(top_commits)
+
+    hits = [h for h in top_commits]
+    assert 5 == len(hits)
+    assert isinstance(hits[0], Commit)
+
+
+@pytest.mark.anyio
+async def test_inner_hits_are_wrapped_in_response(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    s = AsyncSearch(index="git")[0:1].query(
+        "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all")
+    )
+    response = await s.execute()
+
+    commit = response.hits[0]
+    assert isinstance(commit.meta.inner_hits.repo, response.__class__)
+    assert repr(commit.meta.inner_hits.repo[0]).startswith(
+        "<Hit(git/elasticsearch-dsl-py): "
+    )
+
+
+@pytest.mark.anyio
+async def test_inner_hits_are_serialized_to_dict(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    s = AsyncSearch(index="git")[0:1].query(
+        "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all")
+    )
+    response = await s.execute()
+    d = response.to_dict(recursive=True)
+    assert isinstance(d, dict)
+    assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict)
+
+    # iterating over the results changes the format of the internal AttrDict
+    for hit in response:
+        pass
+
+    d = response.to_dict(recursive=True)
+    assert isinstance(d, dict)
+    assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict)
+
+
+@pytest.mark.anyio
+async def test_scan_respects_doc_types(async_data_client: AsyncElasticsearch) -> None:
+    repos = [repo async for repo in Repository.search().scan()]
+
+    assert 1 == len(repos)
+    assert isinstance(repos[0], Repository)
+    assert repos[0].organization == "elasticsearch"
+
+
+@pytest.mark.anyio
+async def test_scan_iterates_through_all_docs(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    s = AsyncSearch(index="flat-git")
+
+    commits = [commit async for commit in s.scan()]
+
+    assert 52 == len(commits)
+    assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
+
+
+@pytest.mark.anyio
+async def test_search_after(async_data_client: AsyncElasticsearch) -> None:
+    page_size = 7
+    s = AsyncSearch(index="flat-git")[:page_size].sort("authored_date")
+    commits = []
+    while True:
+        r = await s.execute()
+        commits += r.hits
+        if len(r.hits) < page_size:
+            break
+        s = s.search_after()
+
+    assert 52 == len(commits)
+    assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
+
+
+@pytest.mark.anyio
+async def test_search_after_no_search(async_data_client: AsyncElasticsearch) -> None:
+    s = AsyncSearch(index="flat-git")
+    with raises(
+        ValueError, match="A search must be executed before using search_after"
+    ):
+        s.search_after()
+    await s.count()
+    with raises(
+        ValueError, match="A search must be executed before using search_after"
+    ):
+        s.search_after()
+
+
+@pytest.mark.anyio
+async def test_search_after_no_sort(async_data_client: AsyncElasticsearch) -> None:
+    s = AsyncSearch(index="flat-git")
+    r = await s.execute()
+    with raises(
+        ValueError, match="Cannot use search_after when results are not sorted"
+    ):
+        r.search_after()
+
+
+@pytest.mark.anyio
+async def test_search_after_no_results(async_data_client: AsyncElasticsearch) -> None:
+    s = AsyncSearch(index="flat-git")[:100].sort("authored_date")
+    r = await s.execute()
+    assert 52 == len(r.hits)
+    s = s.search_after()
+    r = await s.execute()
+    assert 0 == len(r.hits)
+    with raises(
+        ValueError, match="Cannot use search_after when there are no search results"
+    ):
+        r.search_after()
+
+
+@pytest.mark.anyio
+async def test_point_in_time(async_data_client: AsyncElasticsearch) -> None:
+    page_size = 7
+    commits = []
+    async with AsyncSearch(index="flat-git")[:page_size].point_in_time(
+        keep_alive="30s"
+    ) as s:
+        pit_id = s._extra["pit"]["id"]
+        while True:
+            r = await s.execute()
+            commits += r.hits
+            if len(r.hits) < page_size:
+                break
+            s = s.search_after()
+            assert pit_id == s._extra["pit"]["id"]
+            assert "30s" == s._extra["pit"]["keep_alive"]
+
+    assert 52 == len(commits)
+    assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
+
+
+@pytest.mark.anyio
+async def test_iterate(async_data_client: AsyncElasticsearch) -> None:
+    s = AsyncSearch(index="flat-git")
+
+    commits = [commit async for commit in s.iterate()]
+
+    assert 52 == len(commits)
+    assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
+
+
+@pytest.mark.anyio
+async def test_response_is_cached(async_data_client: AsyncElasticsearch) -> None:
+    s = Repository.search()
+    repos = [repo async for repo in s]
+
+    assert hasattr(s, "_response")
+    assert s._response.hits == repos
+
+
+@pytest.mark.anyio
+async def test_multi_search(async_data_client: AsyncElasticsearch) -> None:
+    s1 = Repository.search()
+    s2 = AsyncSearch[Repository](index="flat-git")
+
+    ms = AsyncMultiSearch[Repository]()
+    ms = ms.add(s1).add(s2)
+
+    r1, r2 = await ms.execute()
+
+    assert 1 == len(r1)
+    assert isinstance(r1[0], Repository)
+    assert r1._search is s1
+
+    assert 52 == r2.hits.total.value  # type: ignore[attr-defined]
+    assert r2._search is s2
+
+
+@pytest.mark.anyio
+async def test_multi_missing(async_data_client: AsyncElasticsearch) -> None:
+    s1 = Repository.search()
+    s2 = AsyncSearch[Repository](index="flat-git")
+    s3 = AsyncSearch[Repository](index="does_not_exist")
+
+    ms = AsyncMultiSearch[Repository]()
+    ms = ms.add(s1).add(s2).add(s3)
+
+    with raises(ApiError):
+        await ms.execute()
+
+    r1, r2, r3 = await ms.execute(raise_on_error=False)
+
+    assert 1 == len(r1)
+    assert isinstance(r1[0], Repository)
+    assert r1._search is s1
+
+    assert 52 == r2.hits.total.value  # type: ignore[attr-defined]
+    assert r2._search is s2
+
+    assert r3 is None
+
+
+@pytest.mark.anyio
+async def test_raw_subfield_can_be_used_in_aggs(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    s = AsyncSearch(index="git")[0:0]
+    s.aggs.bucket("authors", "terms", field="author.name.raw", size=1)
+
+    r = await s.execute()
+
+    authors = r.aggregations.authors
+    assert 1 == len(authors)
+    assert {"key": "Honza Král", "doc_count": 52} == authors[0]
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_update_by_query.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_update_by_query.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_async/test_update_by_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_async/test_update_by_query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,85 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.dsl import AsyncUpdateByQuery
+from elasticsearch.dsl.search import Q
+
+
+@pytest.mark.anyio
+async def test_update_by_query_no_script(
+    async_write_client: AsyncElasticsearch, setup_ubq_tests: str
+) -> None:
+    index = setup_ubq_tests
+
+    ubq = (
+        AsyncUpdateByQuery(using=async_write_client)
+        .index(index)
+        .filter(~Q("exists", field="is_public"))
+    )
+    response = await ubq.execute()
+
+    assert response.total == 52
+    assert response["took"] > 0
+    assert not response.timed_out
+    assert response.updated == 52
+    assert response.deleted == 0
+    assert response.took > 0
+    assert response.success()
+
+
+@pytest.mark.anyio
+async def test_update_by_query_with_script(
+    async_write_client: AsyncElasticsearch, setup_ubq_tests: str
+) -> None:
+    index = setup_ubq_tests
+
+    ubq = (
+        AsyncUpdateByQuery(using=async_write_client)
+        .index(index)
+        .filter(~Q("exists", field="parent_shas"))
+        .script(source="ctx._source.is_public = false")
+    )
+    ubq = ubq.params(conflicts="proceed")
+
+    response = await ubq.execute()
+    assert response.total == 2
+    assert response.updated == 2
+    assert response.version_conflicts == 0
+
+
+@pytest.mark.anyio
+async def test_delete_by_query_with_script(
+    async_write_client: AsyncElasticsearch, setup_ubq_tests: str
+) -> None:
+    index = setup_ubq_tests
+
+    ubq = (
+        AsyncUpdateByQuery(using=async_write_client)
+        .index(index)
+        .filter(Q("match", parent_shas="1dd19210b5be92b960f7db6f66ae526288edccc3"))
+        .script(source='ctx.op = "delete"')
+    )
+    ubq = ubq.params(conflicts="proceed")
+
+    response = await ubq.execute()
+
+    assert response.total == 1
+    assert response.deleted == 1
+    assert response.success()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,54 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import analyzer, token_filter, tokenizer
+
+
+@pytest.mark.sync
+def test_simulate_with_just__builtin_tokenizer(
+    client: Elasticsearch,
+) -> None:
+    a = analyzer("my-analyzer", tokenizer="keyword")
+    tokens = (a.simulate("Hello World!", using=client)).tokens
+
+    assert len(tokens) == 1
+    assert tokens[0].token == "Hello World!"
+
+
+@pytest.mark.sync
+def test_simulate_complex(client: Elasticsearch) -> None:
+    a = analyzer(
+        "my-analyzer",
+        tokenizer=tokenizer("split_words", "simple_pattern_split", pattern=":"),
+        filter=["lowercase", token_filter("no-ifs", "stop", stopwords=["if"])],
+    )
+
+    tokens = (a.simulate("if:this:works", using=client)).tokens
+
+    assert len(tokens) == 2
+    assert ["this", "works"] == [t.token for t in tokens]
+
+
+@pytest.mark.sync
+def test_simulate_builtin(client: Elasticsearch) -> None:
+    a = analyzer("my-analyzer", "english")
+    tokens = (a.simulate("fixes running")).tokens
+
+    assert ["fix", "run"] == [t.token for t in tokens]
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,918 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+# this file creates several documents using bad or no types because
+# these are still supported and should be kept functional in spite
+# of not having appropriate type hints. For that reason the comment
+# below disables many mypy checks that fails as a result of this.
+# mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined"
+
+from datetime import datetime
+from ipaddress import ip_address
+from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Union
+
+import pytest
+from pytest import raises
+from pytz import timezone
+
+from elasticsearch import ConflictError, Elasticsearch, NotFoundError
+from elasticsearch.dsl import (
+    AttrDict,
+    Binary,
+    Boolean,
+    Date,
+    DenseVector,
+    Document,
+    Double,
+    InnerDoc,
+    Ip,
+    Keyword,
+    Long,
+    M,
+    Mapping,
+    MetaField,
+    Nested,
+    Object,
+    Q,
+    RankFeatures,
+    Search,
+    Text,
+    analyzer,
+    mapped_field,
+)
+from elasticsearch.dsl.query import Match
+from elasticsearch.dsl.types import MatchQuery
+from elasticsearch.dsl.utils import AttrList
+from elasticsearch.helpers.errors import BulkIndexError
+
+snowball = analyzer("my_snow", tokenizer="standard", filter=["lowercase", "snowball"])
+
+
+class User(InnerDoc):
+    name = Text(fields={"raw": Keyword()})
+
+
+class Wiki(Document):
+    owner = Object(User)
+    views = Long()
+    ranked = RankFeatures()
+
+    class Index:
+        name = "test-wiki"
+
+
+class Repository(Document):
+    owner = Object(User)
+    created_at = Date()
+    description = Text(analyzer=snowball)
+    tags = Keyword()
+
+    @classmethod
+    def search(cls) -> Search["Repository"]:  # type: ignore[override]
+        return super().search().filter("term", commit_repo="repo")
+
+    class Index:
+        name = "git"
+
+
+class Commit(Document):
+    committed_date = Date()
+    authored_date = Date()
+    description = Text(analyzer=snowball)
+
+    class Index:
+        name = "flat-git"
+
+    class Meta:
+        mapping = Mapping()
+
+
+class History(InnerDoc):
+    timestamp = Date()
+    diff = Text()
+
+
+class Comment(InnerDoc):
+    content = Text()
+    created_at = Date()
+    author = Object(User)
+    history = Nested(History)
+
+    class Meta:
+        dynamic = MetaField(False)
+
+
+class PullRequest(Document):
+    comments = Nested(Comment)
+    created_at = Date()
+
+    class Index:
+        name = "test-prs"
+
+
+class SerializationDoc(Document):
+    i = Long()
+    b = Boolean()
+    d = Double()
+    bin = Binary()
+    ip = Ip()
+
+    class Index:
+        name = "test-serialization"
+
+
+class Tags(Document):
+    tags = Keyword(multi=True)
+
+    class Index:
+        name = "tags"
+
+
+@pytest.mark.sync
+def test_serialization(write_client: Elasticsearch) -> None:
+    SerializationDoc.init()
+    write_client.index(
+        index="test-serialization",
+        id=42,
+        body={
+            "i": [1, 2, "3", None],
+            "b": [True, False, "true", "false", None],
+            "d": [0.1, "-0.1", None],
+            "bin": ["SGVsbG8gV29ybGQ=", None],
+            "ip": ["::1", "127.0.0.1", None],
+        },
+    )
+    sd = SerializationDoc.get(id=42)
+    assert sd is not None
+
+    assert sd.i == [1, 2, 3, None]
+    assert sd.b == [True, False, True, False, None]
+    assert sd.d == [0.1, -0.1, None]
+    assert sd.bin == [b"Hello World", None]
+    assert sd.ip == [ip_address("::1"), ip_address("127.0.0.1"), None]
+
+    assert sd.to_dict() == {
+        "b": [True, False, True, False, None],
+        "bin": ["SGVsbG8gV29ybGQ=", None],
+        "d": [0.1, -0.1, None],
+        "i": [1, 2, 3, None],
+        "ip": ["::1", "127.0.0.1", None],
+    }
+
+
+@pytest.mark.sync
+def test_nested_inner_hits_are_wrapped_properly(pull_request: Any) -> None:
+    history_query = Q(
+        "nested",
+        path="comments.history",
+        inner_hits={},
+        query=Q("match", comments__history__diff="ahoj"),
+    )
+    s = PullRequest.search().query(
+        "nested", inner_hits={}, path="comments", query=history_query
+    )
+
+    response = s.execute()
+    pr = response.hits[0]
+    assert isinstance(pr, PullRequest)
+    assert isinstance(pr.comments[0], Comment)
+    assert isinstance(pr.comments[0].history[0], History)
+
+    comment = pr.meta.inner_hits.comments.hits[0]
+    assert isinstance(comment, Comment)
+    assert comment.author.name == "honzakral"
+    assert isinstance(comment.history[0], History)
+
+    history = comment.meta.inner_hits["comments.history"].hits[0]
+    assert isinstance(history, History)
+    assert history.timestamp == datetime(2012, 1, 1)
+    assert "score" in history.meta
+
+
+@pytest.mark.sync
+def test_nested_inner_hits_are_deserialized_properly(
+    pull_request: Any,
+) -> None:
+    s = PullRequest.search().query(
+        "nested",
+        inner_hits={},
+        path="comments",
+        query=Q("match", comments__content="hello"),
+    )
+
+    response = s.execute()
+    pr = response.hits[0]
+    assert isinstance(pr.created_at, datetime)
+    assert isinstance(pr.comments[0], Comment)
+    assert isinstance(pr.comments[0].created_at, datetime)
+
+
+@pytest.mark.sync
+def test_nested_top_hits_are_wrapped_properly(pull_request: Any) -> None:
+    s = PullRequest.search()
+    s.aggs.bucket("comments", "nested", path="comments").metric(
+        "hits", "top_hits", size=1
+    )
+
+    r = s.execute()
+
+    print(r._d_)
+    assert isinstance(r.aggregations.comments.hits.hits[0], Comment)
+
+
+@pytest.mark.sync
+def test_update_object_field(write_client: Elasticsearch) -> None:
+    Wiki.init()
+    w = Wiki(
+        owner=User(name="Honza Kral"),
+        _id="elasticsearch-py",
+        ranked={"test1": 0.1, "topic2": 0.2},
+    )
+    w.save()
+
+    assert "updated" == w.update(owner=[{"name": "Honza"}, User(name="Nick")])
+    assert w.owner[0].name == "Honza"
+    assert w.owner[1].name == "Nick"
+
+    w = Wiki.get(id="elasticsearch-py")
+    assert w.owner[0].name == "Honza"
+    assert w.owner[1].name == "Nick"
+
+    assert w.ranked == {"test1": 0.1, "topic2": 0.2}
+
+
+@pytest.mark.sync
+def test_update_script(write_client: Elasticsearch) -> None:
+    Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    w.save()
+
+    w.update(script="ctx._source.views += params.inc", inc=5)
+    w = Wiki.get(id="elasticsearch-py")
+    assert w.views == 47
+
+
+@pytest.mark.sync
+def test_update_script_with_dict(write_client: Elasticsearch) -> None:
+    Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    w.save()
+
+    w.update(
+        script={
+            "source": "ctx._source.views += params.inc1 + params.inc2",
+            "params": {"inc1": 2},
+            "lang": "painless",
+        },
+        inc2=3,
+    )
+    w = Wiki.get(id="elasticsearch-py")
+    assert w.views == 47
+
+
+@pytest.mark.sync
+def test_update_retry_on_conflict(write_client: Elasticsearch) -> None:
+    Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    w.save()
+
+    w1 = Wiki.get(id="elasticsearch-py")
+    w2 = Wiki.get(id="elasticsearch-py")
+    assert w1 is not None
+    assert w2 is not None
+
+    w1.update(script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1)
+    w2.update(script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1)
+
+    w = Wiki.get(id="elasticsearch-py")
+    assert w.views == 52
+
+
+@pytest.mark.sync
+@pytest.mark.parametrize("retry_on_conflict", [None, 0])
+def test_update_conflicting_version(
+    write_client: Elasticsearch, retry_on_conflict: bool
+) -> None:
+    Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    w.save()
+
+    w1 = Wiki.get(id="elasticsearch-py")
+    w2 = Wiki.get(id="elasticsearch-py")
+    assert w1 is not None
+    assert w2 is not None
+
+    w1.update(script="ctx._source.views += params.inc", inc=5)
+
+    with raises(ConflictError):
+        w2.update(
+            script="ctx._source.views += params.inc",
+            inc=5,
+            retry_on_conflict=retry_on_conflict,
+        )
+
+
+@pytest.mark.sync
+def test_save_and_update_return_doc_meta(
+    write_client: Elasticsearch,
+) -> None:
+    Wiki.init()
+    w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42)
+    resp = w.save(return_doc_meta=True)
+    assert resp["_index"] == "test-wiki"
+    assert resp["result"] == "created"
+    assert set(resp.keys()) == {
+        "_id",
+        "_index",
+        "_primary_term",
+        "_seq_no",
+        "_shards",
+        "_version",
+        "result",
+    }
+
+    resp = w.update(
+        script="ctx._source.views += params.inc", inc=5, return_doc_meta=True
+    )
+    assert resp["_index"] == "test-wiki"
+    assert resp["result"] == "updated"
+    assert set(resp.keys()) == {
+        "_id",
+        "_index",
+        "_primary_term",
+        "_seq_no",
+        "_shards",
+        "_version",
+        "result",
+    }
+
+
+@pytest.mark.sync
+def test_init(write_client: Elasticsearch) -> None:
+    Repository.init(index="test-git")
+
+    assert write_client.indices.exists(index="test-git")
+
+
+@pytest.mark.sync
+def test_get_raises_404_on_index_missing(
+    data_client: Elasticsearch,
+) -> None:
+    with raises(NotFoundError):
+        Repository.get("elasticsearch-dsl-php", index="not-there")
+
+
+@pytest.mark.sync
+def test_get_raises_404_on_non_existent_id(
+    data_client: Elasticsearch,
+) -> None:
+    with raises(NotFoundError):
+        Repository.get("elasticsearch-dsl-php")
+
+
+@pytest.mark.sync
+def test_get_returns_none_if_404_ignored(
+    data_client: Elasticsearch,
+) -> None:
+    assert None is Repository.get(
+        "elasticsearch-dsl-php", using=data_client.options(ignore_status=404)
+    )
+
+
+@pytest.mark.sync
+def test_get_returns_none_if_404_ignored_and_index_doesnt_exist(
+    data_client: Elasticsearch,
+) -> None:
+    assert None is Repository.get(
+        "42", index="not-there", using=data_client.options(ignore_status=404)
+    )
+
+
+@pytest.mark.sync
+def test_get(data_client: Elasticsearch) -> None:
+    elasticsearch_repo = Repository.get("elasticsearch-dsl-py")
+
+    assert isinstance(elasticsearch_repo, Repository)
+    assert elasticsearch_repo.owner.name == "elasticsearch"
+    assert datetime(2014, 3, 3) == elasticsearch_repo.created_at
+
+
+@pytest.mark.sync
+def test_exists_return_true(data_client: Elasticsearch) -> None:
+    assert Repository.exists("elasticsearch-dsl-py")
+
+
+@pytest.mark.sync
+def test_exists_false(data_client: Elasticsearch) -> None:
+    assert not Repository.exists("elasticsearch-dsl-php")
+
+
+@pytest.mark.sync
+def test_get_with_tz_date(data_client: Elasticsearch) -> None:
+    first_commit = Commit.get(
+        id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py"
+    )
+    assert first_commit is not None
+
+    tzinfo = timezone("Europe/Prague")
+    assert (
+        tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123000))
+        == first_commit.authored_date
+    )
+
+
+@pytest.mark.sync
+def test_save_with_tz_date(data_client: Elasticsearch) -> None:
+    tzinfo = timezone("Europe/Prague")
+    first_commit = Commit.get(
+        id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py"
+    )
+    assert first_commit is not None
+
+    first_commit.committed_date = tzinfo.localize(
+        datetime(2014, 5, 2, 13, 47, 19, 123456)
+    )
+    first_commit.save()
+
+    first_commit = Commit.get(
+        id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py"
+    )
+    assert first_commit is not None
+
+    assert (
+        tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123456))
+        == first_commit.committed_date
+    )
+
+
+COMMIT_DOCS_WITH_MISSING = [
+    {"_id": "0"},  # Missing
+    {"_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"},  # Existing
+    {"_id": "f"},  # Missing
+    {"_id": "eb3e543323f189fd7b698e66295427204fff5755"},  # Existing
+]
+
+
+@pytest.mark.sync
+def test_mget(data_client: Elasticsearch) -> None:
+    commits = Commit.mget(COMMIT_DOCS_WITH_MISSING)
+    assert commits[0] is None
+    assert commits[1] is not None
+    assert commits[1].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"
+    assert commits[2] is None
+    assert commits[3] is not None
+    assert commits[3].meta.id == "eb3e543323f189fd7b698e66295427204fff5755"
+
+
+@pytest.mark.sync
+def test_mget_raises_exception_when_missing_param_is_invalid(
+    data_client: Elasticsearch,
+) -> None:
+    with raises(ValueError):
+        Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raj")
+
+
+@pytest.mark.sync
+def test_mget_raises_404_when_missing_param_is_raise(
+    data_client: Elasticsearch,
+) -> None:
+    with raises(NotFoundError):
+        Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raise")
+
+
+@pytest.mark.sync
+def test_mget_ignores_missing_docs_when_missing_param_is_skip(
+    data_client: Elasticsearch,
+) -> None:
+    commits = Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="skip")
+    assert commits[0] is not None
+    assert commits[0].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"
+    assert commits[1] is not None
+    assert commits[1].meta.id == "eb3e543323f189fd7b698e66295427204fff5755"
+
+
+@pytest.mark.sync
+def test_update_works_from_search_response(
+    data_client: Elasticsearch,
+) -> None:
+    elasticsearch_repo = (Repository.search().execute())[0]
+
+    elasticsearch_repo.update(owner={"other_name": "elastic"})
+    assert "elastic" == elasticsearch_repo.owner.other_name
+
+    new_version = Repository.get("elasticsearch-dsl-py")
+    assert new_version is not None
+    assert "elastic" == new_version.owner.other_name
+    assert "elasticsearch" == new_version.owner.name
+
+
+@pytest.mark.sync
+def test_update(data_client: Elasticsearch) -> None:
+    elasticsearch_repo = Repository.get("elasticsearch-dsl-py")
+    assert elasticsearch_repo is not None
+    v = elasticsearch_repo.meta.version
+
+    old_seq_no = elasticsearch_repo.meta.seq_no
+    elasticsearch_repo.update(owner={"new_name": "elastic"}, new_field="testing-update")
+
+    assert "elastic" == elasticsearch_repo.owner.new_name
+    assert "testing-update" == elasticsearch_repo.new_field
+
+    # assert version has been updated
+    assert elasticsearch_repo.meta.version == v + 1
+
+    new_version = Repository.get("elasticsearch-dsl-py")
+    assert new_version is not None
+    assert "testing-update" == new_version.new_field
+    assert "elastic" == new_version.owner.new_name
+    assert "elasticsearch" == new_version.owner.name
+    assert "seq_no" in new_version.meta
+    assert new_version.meta.seq_no != old_seq_no
+    assert "primary_term" in new_version.meta
+
+
+@pytest.mark.sync
+def test_save_updates_existing_doc(data_client: Elasticsearch) -> None:
+    elasticsearch_repo = Repository.get("elasticsearch-dsl-py")
+    assert elasticsearch_repo is not None
+
+    elasticsearch_repo.new_field = "testing-save"
+    old_seq_no = elasticsearch_repo.meta.seq_no
+    assert "updated" == elasticsearch_repo.save()
+
+    new_repo = data_client.get(index="git", id="elasticsearch-dsl-py")
+    assert "testing-save" == new_repo["_source"]["new_field"]
+    assert new_repo["_seq_no"] != old_seq_no
+    assert new_repo["_seq_no"] == elasticsearch_repo.meta.seq_no
+
+
+@pytest.mark.sync
+def test_update_empty_field(client: Elasticsearch) -> None:
+    Tags._index.delete(ignore_unavailable=True)
+    Tags.init()
+    d = Tags(id="123", tags=["a", "b"])
+    d.save(refresh=True)
+    d.update(tags=[], refresh=True)
+    assert d.tags == []
+
+    r = Tags.search().execute()
+    assert r.hits[0].tags == []
+
+
+@pytest.mark.sync
+def test_save_automatically_uses_seq_no_and_primary_term(
+    data_client: Elasticsearch,
+) -> None:
+    elasticsearch_repo = Repository.get("elasticsearch-dsl-py")
+    assert elasticsearch_repo is not None
+    elasticsearch_repo.meta.seq_no += 1
+
+    with raises(ConflictError):
+        elasticsearch_repo.save()
+
+
+@pytest.mark.sync
+def test_delete_automatically_uses_seq_no_and_primary_term(
+    data_client: Elasticsearch,
+) -> None:
+    elasticsearch_repo = Repository.get("elasticsearch-dsl-py")
+    assert elasticsearch_repo is not None
+    elasticsearch_repo.meta.seq_no += 1
+
+    with raises(ConflictError):
+        elasticsearch_repo.delete()
+
+
+def assert_doc_equals(expected: Any, actual: Any) -> None:
+    for f in expected:
+        assert f in actual
+        assert actual[f] == expected[f]
+
+
+@pytest.mark.sync
+def test_can_save_to_different_index(
+    write_client: Elasticsearch,
+) -> None:
+    test_repo = Repository(description="testing", meta={"id": 42})
+    assert test_repo.save(index="test-document")
+
+    assert_doc_equals(
+        {
+            "found": True,
+            "_index": "test-document",
+            "_id": "42",
+            "_source": {"description": "testing"},
+        },
+        write_client.get(index="test-document", id=42),
+    )
+
+
+@pytest.mark.sync
+@pytest.mark.parametrize("validate", (True, False))
+def test_save_without_skip_empty_will_include_empty_fields(
+    write_client: Elasticsearch,
+    validate: bool,
+) -> None:
+    test_repo = Repository(
+        field_1=[], field_2=None, field_3={}, owner={"name": None}, meta={"id": 42}
+    )
+    assert test_repo.save(index="test-document", skip_empty=False, validate=validate)
+
+    assert_doc_equals(
+        {
+            "found": True,
+            "_index": "test-document",
+            "_id": "42",
+            "_source": {
+                "field_1": [],
+                "field_2": None,
+                "field_3": {},
+                "owner": {"name": None},
+            },
+        },
+        write_client.get(index="test-document", id=42),
+    )
+
+    test_repo = Repository(owner=AttrDict({"name": None}), meta={"id": 43})
+    assert test_repo.save(index="test-document", skip_empty=False, validate=validate)
+
+    assert_doc_equals(
+        {
+            "found": True,
+            "_index": "test-document",
+            "_id": "43",
+            "_source": {
+                "owner": {"name": None},
+            },
+        },
+        write_client.get(index="test-document", id=43),
+    )
+
+
+@pytest.mark.sync
+def test_delete(write_client: Elasticsearch) -> None:
+    write_client.create(
+        index="test-document",
+        id="elasticsearch-dsl-py",
+        body={
+            "organization": "elasticsearch",
+            "created_at": "2014-03-03",
+            "owner": {"name": "elasticsearch"},
+        },
+    )
+
+    test_repo = Repository(meta={"id": "elasticsearch-dsl-py"})
+    test_repo.meta.index = "test-document"
+    test_repo.delete()
+
+    assert not write_client.exists(
+        index="test-document",
+        id="elasticsearch-dsl-py",
+    )
+
+
+@pytest.mark.sync
+def test_search(data_client: Elasticsearch) -> None:
+    assert Repository.search().count() == 1
+
+
+@pytest.mark.sync
+def test_search_returns_proper_doc_classes(
+    data_client: Elasticsearch,
+) -> None:
+    result = Repository.search().execute()
+
+    elasticsearch_repo = result.hits[0]
+
+    assert isinstance(elasticsearch_repo, Repository)
+    assert elasticsearch_repo.owner.name == "elasticsearch"
+
+
+@pytest.mark.sync
+def test_refresh_mapping(data_client: Elasticsearch) -> None:
+    class Commit(Document):
+        class Index:
+            name = "git"
+
+    Commit._index.load_mappings()
+
+    assert "stats" in Commit._index._mapping
+    assert "committer" in Commit._index._mapping
+    assert "description" in Commit._index._mapping
+    assert "committed_date" in Commit._index._mapping
+    assert isinstance(Commit._index._mapping["committed_date"], Date)
+
+
+@pytest.mark.sync
+def test_highlight_in_meta(data_client: Elasticsearch) -> None:
+    commit = (
+        Commit.search()
+        .query("match", description="inverting")
+        .highlight("description")
+        .execute()
+    )[0]
+
+    assert isinstance(commit, Commit)
+    assert "description" in commit.meta.highlight
+    assert isinstance(commit.meta.highlight["description"], AttrList)
+    assert len(commit.meta.highlight["description"]) > 0
+
+
+@pytest.mark.sync
+def test_bulk(data_client: Elasticsearch) -> None:
+    class Address(InnerDoc):
+        street: str
+        active: bool
+
+    class Doc(Document):
+        if TYPE_CHECKING:
+            _id: int
+        name: str
+        age: int
+        languages: List[str] = mapped_field(Keyword())
+        addresses: List[Address]
+
+        class Index:
+            name = "bulk-index"
+
+    Doc._index.delete(ignore_unavailable=True)
+    Doc.init()
+
+    def gen1() -> Iterator[Union[Doc, Dict[str, Any]]]:
+        yield Doc(
+            name="Joe",
+            age=33,
+            languages=["en", "fr"],
+            addresses=[
+                Address(street="123 Main St", active=True),
+                Address(street="321 Park Dr.", active=False),
+            ],
+        )
+        yield Doc(name="Susan", age=20, languages=["en"])
+        yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)}
+
+    Doc.bulk(gen1(), refresh=True)
+    docs = list(Doc.search().execute())
+    assert len(docs) == 3
+    assert docs[0].to_dict() == {
+        "name": "Joe",
+        "age": 33,
+        "languages": [
+            "en",
+            "fr",
+        ],
+        "addresses": [
+            {
+                "active": True,
+                "street": "123 Main St",
+            },
+            {
+                "active": False,
+                "street": "321 Park Dr.",
+            },
+        ],
+    }
+    assert docs[1].to_dict() == {
+        "name": "Susan",
+        "age": 20,
+        "languages": ["en"],
+    }
+    assert docs[2].to_dict() == {
+        "name": "Sarah",
+        "age": 45,
+    }
+    assert docs[2].meta.id == "45"
+
+    def gen2() -> Iterator[Union[Doc, Dict[str, Any]]]:
+        yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)}
+
+    # a "create" action with an existing id should fail
+    with raises(BulkIndexError):
+        Doc.bulk(gen2(), refresh=True)
+
+    def gen3() -> Iterator[Union[Doc, Dict[str, Any]]]:
+        yield Doc(_id="45", name="Sarah", age=45, languages=["es"])
+        yield {"_op_type": "delete", "_id": docs[1].meta.id}
+
+    Doc.bulk(gen3(), refresh=True)
+    with raises(NotFoundError):
+        Doc.get(docs[1].meta.id)
+    doc = Doc.get("45")
+    assert doc is not None
+    assert (doc).to_dict() == {
+        "name": "Sarah",
+        "age": 45,
+        "languages": ["es"],
+    }
+
+
+@pytest.mark.sync
+def test_legacy_dense_vector(
+    client: Elasticsearch, es_version: Tuple[int, ...]
+) -> None:
+    if es_version >= (8, 16):
+        pytest.skip("this test is a legacy version for Elasticsearch 8.15 or older")
+
+    class Doc(Document):
+        float_vector: List[float] = mapped_field(DenseVector(dims=3))
+
+        class Index:
+            name = "vectors"
+
+    Doc._index.delete(ignore_unavailable=True)
+    Doc.init()
+
+    doc = Doc(float_vector=[1.0, 1.2, 2.3])
+    doc.save(refresh=True)
+
+    docs = Doc.search().execute()
+    assert len(docs) == 1
+    assert docs[0].float_vector == doc.float_vector
+
+
+@pytest.mark.sync
+def test_dense_vector(client: Elasticsearch, es_version: Tuple[int, ...]) -> None:
+    if es_version < (8, 16):
+        pytest.skip("this test requires Elasticsearch 8.16 or newer")
+
+    class Doc(Document):
+        float_vector: List[float] = mapped_field(DenseVector())
+        byte_vector: List[int] = mapped_field(DenseVector(element_type="byte"))
+        bit_vector: List[int] = mapped_field(DenseVector(element_type="bit"))
+
+        class Index:
+            name = "vectors"
+
+    Doc._index.delete(ignore_unavailable=True)
+    Doc.init()
+
+    doc = Doc(
+        float_vector=[1.0, 1.2, 2.3],
+        byte_vector=[12, 23, 34, 45],
+        bit_vector=[18, -43, -112],
+    )
+    doc.save(refresh=True)
+
+    docs = Doc.search().execute()
+    assert len(docs) == 1
+    assert [round(v, 1) for v in docs[0].float_vector] == doc.float_vector
+    assert docs[0].byte_vector == doc.byte_vector
+    assert docs[0].bit_vector == doc.bit_vector
+
+
+@pytest.mark.sync
+def test_copy_to(client: Elasticsearch) -> None:
+    class Person(Document):
+        first_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"]))
+        last_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"]))
+        birth_place: M[str] = mapped_field(Text(copy_to="all"))
+        full_name: M[Optional[str]] = mapped_field(init=False)
+        all: M[Optional[str]] = mapped_field(init=False)
+
+        class Index:
+            name = "people"
+
+    Person._index.delete(ignore_unavailable=True)
+    Person.init()
+
+    person = Person(first_name="Jane", last_name="Doe", birth_place="Springfield")
+    person.save()
+    Person._index.refresh()
+
+    match = (
+        Person.search()
+        .query(Match(Person.full_name, MatchQuery(query="Jane")))
+        .execute()
+    )
+    assert len(match) == 1
+
+    match = Person.search().query(Match(Person.all, MatchQuery(query="Doe"))).execute()
+    assert len(match) == 1
+
+    match = (
+        Person.search()
+        .query(Match(Person.full_name, MatchQuery(query="Springfield")))
+        .execute()
+    )
+    assert len(match) == 0
+
+    match = (
+        Person.search()
+        .query(Match(Person.all, MatchQuery(query="Springfield")))
+        .execute()
+    )
+    assert len(match) == 1
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_esql.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_esql.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_esql.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_esql.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,254 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch.dsl import Document, InnerDoc, M
+from elasticsearch.esql import ESQL, E, functions
+
+
+class Address(InnerDoc):
+    address: M[str]
+    city: M[str]
+
+
+class Employee(Document):
+    emp_no: M[int]
+    first_name: M[str]
+    last_name: M[str]
+    height: M[float]
+    still_hired: M[bool]
+    address: M[Address]
+
+    class Index:
+        name = "employees"
+
+
+def load_db():
+    data = [
+        [
+            10000,
+            "Joseph",
+            "Wall",
+            2.2,
+            True,
+            Address(address="8875 Long Shoals Suite 441", city="Marcville, TX"),
+        ],
+        [
+            10001,
+            "Stephanie",
+            "Ward",
+            1.749,
+            True,
+            Address(address="90162 Carter Harbor Suite 099", city="Davisborough, DE"),
+        ],
+        [
+            10002,
+            "David",
+            "Keller",
+            1.872,
+            True,
+            Address(address="6697 Patrick Union Suite 797", city="Fuentesmouth, SD"),
+        ],
+        [
+            10003,
+            "Roger",
+            "Hinton",
+            1.694,
+            False,
+            Address(address="809 Kelly Mountains", city="South Megan, DE"),
+        ],
+        [
+            10004,
+            "Joshua",
+            "Garcia",
+            1.661,
+            False,
+            Address(address="718 Angela Forks", city="Port Erinland, MA"),
+        ],
+        [
+            10005,
+            "Matthew",
+            "Richards",
+            1.633,
+            False,
+            Address(address="2869 Brown Mountains", city="New Debra, NH"),
+        ],
+        [
+            10006,
+            "Maria",
+            "Luna",
+            1.893,
+            True,
+            Address(address="5861 Morgan Springs", city="Lake Daniel, WI"),
+        ],
+        [
+            10007,
+            "Angela",
+            "Navarro",
+            1.604,
+            False,
+            Address(address="2848 Allen Station", city="Saint Joseph, OR"),
+        ],
+        [
+            10008,
+            "Maria",
+            "Cannon",
+            2.079,
+            False,
+            Address(address="322 NW Johnston", city="Bakerburgh, MP"),
+        ],
+        [
+            10009,
+            "Joseph",
+            "Sutton",
+            2.025,
+            True,
+            Address(address="77 Cardinal E", city="Lakestown, IL"),
+        ],
+    ]
+    if Employee._index.exists():
+        Employee._index.delete()
+    Employee.init()
+
+    for e in data:
+        employee = Employee(
+            emp_no=e[0],
+            first_name=e[1],
+            last_name=e[2],
+            height=e[3],
+            still_hired=e[4],
+            address=e[5],
+        )
+        employee.save()
+    Employee._index.refresh()
+
+
+@pytest.mark.sync
+def test_esql(client):
+    load_db()
+
+    # get the full names of the employees
+    query = (
+        ESQL.from_(Employee)
+        .eval(full_name=functions.concat(Employee.first_name, " ", Employee.last_name))
+        .keep("full_name")
+        .sort("full_name")
+        .limit(10)
+    )
+    r = client.esql.query(query=str(query))
+    assert r.body["values"] == [
+        ["Angela Navarro"],
+        ["David Keller"],
+        ["Joseph Sutton"],
+        ["Joseph Wall"],
+        ["Joshua Garcia"],
+        ["Maria Cannon"],
+        ["Maria Luna"],
+        ["Matthew Richards"],
+        ["Roger Hinton"],
+        ["Stephanie Ward"],
+    ]
+
+    # get the average height of all hired employees
+    query = ESQL.from_(Employee).stats(
+        avg_height=functions.round(functions.avg(Employee.height), 2).where(
+            Employee.still_hired == True  # noqa: E712
+        )
+    )
+    r = client.esql.query(query=str(query))
+    assert r.body["values"] == [[1.95]]
+
+    # find employees by name using a parameter
+    query = (
+        ESQL.from_(Employee)
+        .where(Employee.first_name == E("?"))
+        .keep(Employee.last_name)
+        .sort(Employee.last_name.desc())
+    )
+    r = client.esql.query(query=str(query), params=["Maria"])
+    assert r.body["values"] == [["Luna"], ["Cannon"]]
+
+
+@pytest.mark.sync
+def test_esql_dsl(client):
+    load_db()
+
+    # get employees with first name "Maria"
+    query = (
+        Employee.esql_from()
+        .where(Employee.first_name == "Maria")
+        .sort("last_name")
+        .limit(10)
+    )
+    marias = []
+    for emp in Employee.esql_execute(query):
+        marias.append(emp)
+    assert len(marias) == 2
+    assert marias[0].last_name == "Cannon"
+    assert marias[0].address.address == "322 NW Johnston"
+    assert marias[0].address.city == "Bakerburgh, MP"
+    assert marias[1].last_name == "Luna"
+    assert marias[1].address.address == "5861 Morgan Springs"
+    assert marias[1].address.city == "Lake Daniel, WI"
+
+    # run a query with a missing field
+    query = (
+        Employee.esql_from()
+        .where(Employee.first_name == "Maria")
+        .drop(Employee.address.city)
+        .sort("last_name")
+        .limit(10)
+    )
+    with pytest.raises(ValueError):
+        Employee.esql_execute(query).__next__()
+    marias = []
+    for emp in Employee.esql_execute(query, ignore_missing_fields=True):
+        marias.append(emp)
+    assert marias[0].last_name == "Cannon"
+    assert marias[0].address.address == "322 NW Johnston"
+    assert marias[0].address.city is None
+    assert marias[1].last_name == "Luna"
+    assert marias[1].address.address == "5861 Morgan Springs"
+    assert marias[1].address.city is None
+
+    # run a query with additional calculated fields
+    query = (
+        Employee.esql_from()
+        .where(Employee.first_name == "Maria")
+        .eval(
+            full_name=functions.concat(Employee.first_name, " ", Employee.last_name),
+            height_cm=functions.to_integer(Employee.height * 100),
+        )
+        .sort("last_name")
+        .limit(10)
+    )
+    assert isinstance(Employee.esql_execute(query).__next__(), Employee)
+    assert isinstance(
+        Employee.esql_execute(query, return_additional=True).__next__(), tuple
+    )
+    marias = []
+    for emp, extra in Employee.esql_execute(query, return_additional=True):
+        marias.append([emp, extra])
+    assert marias[0][0].last_name == "Cannon"
+    assert marias[0][0].address.address == "322 NW Johnston"
+    assert marias[0][0].address.city == "Bakerburgh, MP"
+    assert marias[0][1] == {"full_name": "Maria Cannon", "height_cm": 208}
+    assert marias[1][0].last_name == "Luna"
+    assert marias[1][0].address.address == "5861 Morgan Springs"
+    assert marias[1][0].address.city == "Lake Daniel, WI"
+    assert marias[1][1] == {"full_name": "Maria Luna", "height_cm": 189}
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,305 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime
+from typing import Tuple, Type
+
+import pytest
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import A, Boolean, Date, Document, Keyword, Search
+from elasticsearch.dsl.faceted_search import (
+    DateHistogramFacet,
+    FacetedSearch,
+    NestedFacet,
+    RangeFacet,
+    TermsFacet,
+)
+
+from .test_document import PullRequest
+
+
+class Repos(Document):
+    is_public = Boolean()
+    created_at = Date()
+
+    class Index:
+        name = "git"
+
+
+class Commit(Document):
+    files = Keyword()
+    committed_date = Date()
+
+    class Index:
+        name = "git"
+
+
+class MetricSearch(FacetedSearch):
+    index = "git"
+    doc_types = [Commit]
+
+    facets = {
+        "files": TermsFacet(field="files", metric=A("max", field="committed_date")),
+    }
+
+
+@pytest.fixture
+def commit_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]:
+    if es_version >= (7, 2):
+        interval_kwargs = {"fixed_interval": "1d"}
+    else:
+        interval_kwargs = {"interval": "day"}
+
+    class CommitSearch(FacetedSearch):
+        index = "flat-git"
+        fields = (
+            "description",
+            "files",
+        )
+
+        facets = {
+            "files": TermsFacet(field="files"),
+            "frequency": DateHistogramFacet(
+                field="authored_date", min_doc_count=1, **interval_kwargs
+            ),
+            "deletions": RangeFacet(
+                field="stats.deletions",
+                ranges=[("ok", (None, 1)), ("good", (1, 5)), ("better", (5, None))],
+            ),
+        }
+
+    return CommitSearch
+
+
+@pytest.fixture
+def repo_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]:
+    interval_type = "calendar_interval" if es_version >= (7, 2) else "interval"
+
+    class RepoSearch(FacetedSearch):
+        index = "git"
+        doc_types = [Repos]
+        facets = {
+            "public": TermsFacet(field="is_public"),
+            "created": DateHistogramFacet(
+                field="created_at", **{interval_type: "month"}
+            ),
+        }
+
+        def search(self) -> Search:
+            s = super().search()
+            return s.filter("term", commit_repo="repo")
+
+    return RepoSearch
+
+
+@pytest.fixture
+def pr_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]:
+    interval_type = "calendar_interval" if es_version >= (7, 2) else "interval"
+
+    class PRSearch(FacetedSearch):
+        index = "test-prs"
+        doc_types = [PullRequest]
+        facets = {
+            "comments": NestedFacet(
+                "comments",
+                DateHistogramFacet(
+                    field="comments.created_at", **{interval_type: "month"}
+                ),
+            )
+        }
+
+    return PRSearch
+
+
+@pytest.mark.sync
+def test_facet_with_custom_metric(data_client: Elasticsearch) -> None:
+    ms = MetricSearch()
+    r = ms.execute()
+
+    dates = [f[1] for f in r.facets.files]
+    assert dates == list(sorted(dates, reverse=True))
+    assert dates[0] == 1399038439000
+
+
+@pytest.mark.sync
+def test_nested_facet(
+    pull_request: PullRequest, pr_search_cls: Type[FacetedSearch]
+) -> None:
+    prs = pr_search_cls()
+    r = prs.execute()
+
+    assert r.hits.total.value == 1  # type: ignore[attr-defined]
+    assert [(datetime(2018, 1, 1, 0, 0), 1, False)] == r.facets.comments
+
+
+@pytest.mark.sync
+def test_nested_facet_with_filter(
+    pull_request: PullRequest, pr_search_cls: Type[FacetedSearch]
+) -> None:
+    prs = pr_search_cls(filters={"comments": datetime(2018, 1, 1, 0, 0)})
+    r = prs.execute()
+
+    assert r.hits.total.value == 1  # type: ignore[attr-defined]
+    assert [(datetime(2018, 1, 1, 0, 0), 1, True)] == r.facets.comments
+
+    prs = pr_search_cls(filters={"comments": datetime(2018, 2, 1, 0, 0)})
+    r = prs.execute()
+    assert not r.hits
+
+
+@pytest.mark.sync
+def test_datehistogram_facet(
+    data_client: Elasticsearch, repo_search_cls: Type[FacetedSearch]
+) -> None:
+    rs = repo_search_cls()
+    r = rs.execute()
+
+    assert r.hits.total.value == 1  # type: ignore[attr-defined]
+    assert [(datetime(2014, 3, 1, 0, 0), 1, False)] == r.facets.created
+
+
+@pytest.mark.sync
+def test_boolean_facet(
+    data_client: Elasticsearch, repo_search_cls: Type[FacetedSearch]
+) -> None:
+    rs = repo_search_cls()
+    r = rs.execute()
+
+    assert r.hits.total.value == 1  # type: ignore[attr-defined]
+    assert [(True, 1, False)] == r.facets.public
+    value, count, selected = r.facets.public[0]
+    assert value is True
+
+
+@pytest.mark.sync
+def test_empty_search_finds_everything(
+    data_client: Elasticsearch,
+    es_version: Tuple[int, ...],
+    commit_search_cls: Type[FacetedSearch],
+) -> None:
+    cs = commit_search_cls()
+    r = cs.execute()
+
+    assert r.hits.total.value == 52  # type: ignore[attr-defined]
+    assert [
+        ("elasticsearch_dsl", 40, False),
+        ("test_elasticsearch_dsl", 35, False),
+        ("elasticsearch_dsl/query.py", 19, False),
+        ("test_elasticsearch_dsl/test_search.py", 15, False),
+        ("elasticsearch_dsl/utils.py", 14, False),
+        ("test_elasticsearch_dsl/test_query.py", 13, False),
+        ("elasticsearch_dsl/search.py", 12, False),
+        ("elasticsearch_dsl/aggs.py", 11, False),
+        ("test_elasticsearch_dsl/test_result.py", 5, False),
+        ("elasticsearch_dsl/result.py", 3, False),
+    ] == r.facets.files
+
+    assert [
+        (datetime(2014, 3, 3, 0, 0), 2, False),
+        (datetime(2014, 3, 4, 0, 0), 1, False),
+        (datetime(2014, 3, 5, 0, 0), 3, False),
+        (datetime(2014, 3, 6, 0, 0), 3, False),
+        (datetime(2014, 3, 7, 0, 0), 9, False),
+        (datetime(2014, 3, 10, 0, 0), 2, False),
+        (datetime(2014, 3, 15, 0, 0), 4, False),
+        (datetime(2014, 3, 21, 0, 0), 2, False),
+        (datetime(2014, 3, 23, 0, 0), 2, False),
+        (datetime(2014, 3, 24, 0, 0), 10, False),
+        (datetime(2014, 4, 20, 0, 0), 2, False),
+        (datetime(2014, 4, 22, 0, 0), 2, False),
+        (datetime(2014, 4, 25, 0, 0), 3, False),
+        (datetime(2014, 4, 26, 0, 0), 2, False),
+        (datetime(2014, 4, 27, 0, 0), 2, False),
+        (datetime(2014, 5, 1, 0, 0), 2, False),
+        (datetime(2014, 5, 2, 0, 0), 1, False),
+    ] == r.facets.frequency
+
+    assert [
+        ("ok", 19, False),
+        ("good", 14, False),
+        ("better", 19, False),
+    ] == r.facets.deletions
+
+
+@pytest.mark.sync
+def test_term_filters_are_shown_as_selected_and_data_is_filtered(
+    data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch]
+) -> None:
+    cs = commit_search_cls(filters={"files": "test_elasticsearch_dsl"})
+
+    r = cs.execute()
+
+    assert 35 == r.hits.total.value  # type: ignore[attr-defined]
+    assert [
+        ("elasticsearch_dsl", 40, False),
+        ("test_elasticsearch_dsl", 35, True),  # selected
+        ("elasticsearch_dsl/query.py", 19, False),
+        ("test_elasticsearch_dsl/test_search.py", 15, False),
+        ("elasticsearch_dsl/utils.py", 14, False),
+        ("test_elasticsearch_dsl/test_query.py", 13, False),
+        ("elasticsearch_dsl/search.py", 12, False),
+        ("elasticsearch_dsl/aggs.py", 11, False),
+        ("test_elasticsearch_dsl/test_result.py", 5, False),
+        ("elasticsearch_dsl/result.py", 3, False),
+    ] == r.facets.files
+
+    assert [
+        (datetime(2014, 3, 3, 0, 0), 1, False),
+        (datetime(2014, 3, 5, 0, 0), 2, False),
+        (datetime(2014, 3, 6, 0, 0), 3, False),
+        (datetime(2014, 3, 7, 0, 0), 6, False),
+        (datetime(2014, 3, 10, 0, 0), 1, False),
+        (datetime(2014, 3, 15, 0, 0), 3, False),
+        (datetime(2014, 3, 21, 0, 0), 2, False),
+        (datetime(2014, 3, 23, 0, 0), 1, False),
+        (datetime(2014, 3, 24, 0, 0), 7, False),
+        (datetime(2014, 4, 20, 0, 0), 1, False),
+        (datetime(2014, 4, 25, 0, 0), 3, False),
+        (datetime(2014, 4, 26, 0, 0), 2, False),
+        (datetime(2014, 4, 27, 0, 0), 1, False),
+        (datetime(2014, 5, 1, 0, 0), 1, False),
+        (datetime(2014, 5, 2, 0, 0), 1, False),
+    ] == r.facets.frequency
+
+    assert [
+        ("ok", 12, False),
+        ("good", 10, False),
+        ("better", 13, False),
+    ] == r.facets.deletions
+
+
+@pytest.mark.sync
+def test_range_filters_are_shown_as_selected_and_data_is_filtered(
+    data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch]
+) -> None:
+    cs = commit_search_cls(filters={"deletions": "better"})
+
+    r = cs.execute()
+
+    assert 19 == r.hits.total.value  # type: ignore[attr-defined]
+
+
+@pytest.mark.sync
+def test_pagination(
+    data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch]
+) -> None:
+    cs = commit_search_cls()
+    cs = cs[0:20]
+
+    assert 52 == cs.count()
+    assert 20 == len(cs.execute())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_index.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_index.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_index.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_index.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,160 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import (
+    ComposableIndexTemplate,
+    Date,
+    Document,
+    Index,
+    IndexTemplate,
+    Text,
+    analysis,
+)
+
+
+class Post(Document):
+    title = Text(analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword"))
+    published_from = Date()
+
+
+@pytest.mark.sync
+def test_index_template_works(write_client: Elasticsearch) -> None:
+    it = IndexTemplate("test-template", "test-legacy-*")
+    it.document(Post)
+    it.settings(number_of_replicas=0, number_of_shards=1)
+    it.save()
+
+    i = Index("test-legacy-blog")
+    i.create()
+
+    assert {
+        "test-legacy-blog": {
+            "mappings": {
+                "properties": {
+                    "title": {"type": "text", "analyzer": "my_analyzer"},
+                    "published_from": {"type": "date"},
+                }
+            }
+        }
+    } == write_client.indices.get_mapping(index="test-legacy-blog")
+
+
+@pytest.mark.sync
+def test_composable_index_template_works(
+    write_client: Elasticsearch,
+) -> None:
+    it = ComposableIndexTemplate("test-template", "test-*")
+    it.document(Post)
+    it.settings(number_of_replicas=0, number_of_shards=1)
+    it.save()
+
+    i = Index("test-blog")
+    i.create()
+
+    assert {
+        "test-blog": {
+            "mappings": {
+                "properties": {
+                    "title": {"type": "text", "analyzer": "my_analyzer"},
+                    "published_from": {"type": "date"},
+                }
+            }
+        }
+    } == write_client.indices.get_mapping(index="test-blog")
+
+
+@pytest.mark.sync
+def test_index_can_be_saved_even_with_settings(
+    write_client: Elasticsearch,
+) -> None:
+    i = Index("test-blog", using=write_client)
+    i.settings(number_of_shards=3, number_of_replicas=0)
+    i.save()
+    i.settings(number_of_replicas=1)
+    i.save()
+
+    assert (
+        "1"
+        == (i.get_settings())["test-blog"]["settings"]["index"]["number_of_replicas"]
+    )
+
+
+@pytest.mark.sync
+def test_index_exists(data_client: Elasticsearch) -> None:
+    assert Index("git").exists()
+    assert not Index("not-there").exists()
+
+
+@pytest.mark.sync
+def test_index_can_be_created_with_settings_and_mappings(
+    write_client: Elasticsearch,
+) -> None:
+    i = Index("test-blog", using=write_client)
+    i.document(Post)
+    i.settings(number_of_replicas=0, number_of_shards=1)
+    i.create()
+
+    assert {
+        "test-blog": {
+            "mappings": {
+                "properties": {
+                    "title": {"type": "text", "analyzer": "my_analyzer"},
+                    "published_from": {"type": "date"},
+                }
+            }
+        }
+    } == write_client.indices.get_mapping(index="test-blog")
+
+    settings = write_client.indices.get_settings(index="test-blog")
+    assert settings["test-blog"]["settings"]["index"]["number_of_replicas"] == "0"
+    assert settings["test-blog"]["settings"]["index"]["number_of_shards"] == "1"
+    assert settings["test-blog"]["settings"]["index"]["analysis"] == {
+        "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}}
+    }
+
+
+@pytest.mark.sync
+def test_delete(write_client: Elasticsearch) -> None:
+    write_client.indices.create(
+        index="test-index",
+        body={"settings": {"number_of_replicas": 0, "number_of_shards": 1}},
+    )
+
+    i = Index("test-index", using=write_client)
+    i.delete()
+    assert not write_client.indices.exists(index="test-index")
+
+
+@pytest.mark.sync
+def test_multiple_indices_with_same_doc_type_work(
+    write_client: Elasticsearch,
+) -> None:
+    i1 = Index("test-index-1", using=write_client)
+    i2 = Index("test-index-2", using=write_client)
+
+    for i in (i1, i2):
+        i.document(Post)
+        i.create()
+
+    for j in ("test-index-1", "test-index-2"):
+        settings = write_client.indices.get_settings(index=j)
+        assert settings[j]["settings"]["index"]["analysis"] == {
+            "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}}
+        }
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,169 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+from pytest import raises
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import Mapping, analysis, exceptions
+
+
+@pytest.mark.sync
+def test_mapping_saved_into_es(write_client: Elasticsearch) -> None:
+    m = Mapping()
+    m.field(
+        "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword")
+    )
+    m.field("tags", "keyword")
+    m.save("test-mapping", using=write_client)
+
+    assert {
+        "test-mapping": {
+            "mappings": {
+                "properties": {
+                    "name": {"type": "text", "analyzer": "my_analyzer"},
+                    "tags": {"type": "keyword"},
+                }
+            }
+        }
+    } == write_client.indices.get_mapping(index="test-mapping")
+
+
+@pytest.mark.sync
+def test_mapping_saved_into_es_when_index_already_exists_closed(
+    write_client: Elasticsearch,
+) -> None:
+    m = Mapping()
+    m.field(
+        "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword")
+    )
+    write_client.indices.create(index="test-mapping")
+
+    with raises(exceptions.IllegalOperation):
+        m.save("test-mapping", using=write_client)
+
+    write_client.cluster.health(index="test-mapping", wait_for_status="yellow")
+    write_client.indices.close(index="test-mapping")
+    m.save("test-mapping", using=write_client)
+
+    assert {
+        "test-mapping": {
+            "mappings": {
+                "properties": {"name": {"type": "text", "analyzer": "my_analyzer"}}
+            }
+        }
+    } == write_client.indices.get_mapping(index="test-mapping")
+
+
+@pytest.mark.sync
+def test_mapping_saved_into_es_when_index_already_exists_with_analysis(
+    write_client: Elasticsearch,
+) -> None:
+    m = Mapping()
+    analyzer = analysis.analyzer("my_analyzer", tokenizer="keyword")
+    m.field("name", "text", analyzer=analyzer)
+
+    new_analysis = analyzer.get_analysis_definition()
+    new_analysis["analyzer"]["other_analyzer"] = {
+        "type": "custom",
+        "tokenizer": "whitespace",
+    }
+    write_client.indices.create(
+        index="test-mapping", body={"settings": {"analysis": new_analysis}}
+    )
+
+    m.field("title", "text", analyzer=analyzer)
+    m.save("test-mapping", using=write_client)
+
+    assert {
+        "test-mapping": {
+            "mappings": {
+                "properties": {
+                    "name": {"type": "text", "analyzer": "my_analyzer"},
+                    "title": {"type": "text", "analyzer": "my_analyzer"},
+                }
+            }
+        }
+    } == write_client.indices.get_mapping(index="test-mapping")
+
+
+@pytest.mark.sync
+def test_mapping_gets_updated_from_es(
+    write_client: Elasticsearch,
+) -> None:
+    write_client.indices.create(
+        index="test-mapping",
+        body={
+            "settings": {"number_of_shards": 1, "number_of_replicas": 0},
+            "mappings": {
+                "date_detection": False,
+                "properties": {
+                    "title": {
+                        "type": "text",
+                        "analyzer": "snowball",
+                        "fields": {"raw": {"type": "keyword"}},
+                    },
+                    "created_at": {"type": "date"},
+                    "comments": {
+                        "type": "nested",
+                        "properties": {
+                            "created": {"type": "date"},
+                            "author": {
+                                "type": "text",
+                                "analyzer": "snowball",
+                                "fields": {"raw": {"type": "keyword"}},
+                            },
+                        },
+                    },
+                },
+            },
+        },
+    )
+
+    m = Mapping.from_es("test-mapping", using=write_client)
+
+    assert ["comments", "created_at", "title"] == list(
+        sorted(m.properties.properties._d_.keys())  # type: ignore[attr-defined]
+    )
+    assert {
+        "date_detection": False,
+        "properties": {
+            "comments": {
+                "type": "nested",
+                "properties": {
+                    "created": {"type": "date"},
+                    "author": {
+                        "analyzer": "snowball",
+                        "fields": {"raw": {"type": "keyword"}},
+                        "type": "text",
+                    },
+                },
+            },
+            "created_at": {"type": "date"},
+            "title": {
+                "analyzer": "snowball",
+                "fields": {"raw": {"type": "keyword"}},
+                "type": "text",
+            },
+        },
+    } == m.to_dict()
+
+    # test same with alias
+    write_client.indices.put_alias(index="test-mapping", name="test-alias")
+
+    m2 = Mapping.from_es("test-alias", using=write_client)
+    assert m2.to_dict() == m.to_dict()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,302 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+import pytest
+from pytest import raises
+
+from elasticsearch import ApiError, Elasticsearch
+from elasticsearch.dsl import (
+    Date,
+    Document,
+    Keyword,
+    MultiSearch,
+    Q,
+    Search,
+    Text,
+)
+from elasticsearch.dsl.response import aggs
+
+from ..test_data import FLAT_DATA
+
+
+class Repository(Document):
+    created_at = Date()
+    description = Text(analyzer="snowball")
+    tags = Keyword()
+
+    @classmethod
+    def search(cls) -> Search["Repository"]:  # type: ignore[override]
+        return super().search().filter("term", commit_repo="repo")
+
+    class Index:
+        name = "git"
+
+
+class Commit(Document):
+    class Index:
+        name = "flat-git"
+
+
+@pytest.mark.sync
+def test_filters_aggregation_buckets_are_accessible(
+    data_client: Elasticsearch,
+) -> None:
+    has_tests_query = Q("term", files="test_elasticsearch_dsl")
+    s = Commit.search()[0:0]
+    s.aggs.bucket("top_authors", "terms", field="author.name.raw").bucket(
+        "has_tests", "filters", filters={"yes": has_tests_query, "no": ~has_tests_query}
+    ).metric("lines", "stats", field="stats.lines")
+
+    response = s.execute()
+
+    assert isinstance(
+        response.aggregations.top_authors.buckets[0].has_tests.buckets.yes, aggs.Bucket
+    )
+    assert (
+        35
+        == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.doc_count
+    )
+    assert (
+        228
+        == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.lines.max
+    )
+
+
+@pytest.mark.sync
+def test_top_hits_are_wrapped_in_response(
+    data_client: Elasticsearch,
+) -> None:
+    s = Commit.search()[0:0]
+    s.aggs.bucket("top_authors", "terms", field="author.name.raw").metric(
+        "top_commits", "top_hits", size=5
+    )
+    response = s.execute()
+
+    top_commits = response.aggregations.top_authors.buckets[0].top_commits
+    assert isinstance(top_commits, aggs.TopHitsData)
+    assert 5 == len(top_commits)
+
+    hits = [h for h in top_commits]
+    assert 5 == len(hits)
+    assert isinstance(hits[0], Commit)
+
+
+@pytest.mark.sync
+def test_inner_hits_are_wrapped_in_response(
+    data_client: Elasticsearch,
+) -> None:
+    s = Search(index="git")[0:1].query(
+        "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all")
+    )
+    response = s.execute()
+
+    commit = response.hits[0]
+    assert isinstance(commit.meta.inner_hits.repo, response.__class__)
+    assert repr(commit.meta.inner_hits.repo[0]).startswith(
+        "<Hit(git/elasticsearch-dsl-py): "
+    )
+
+
+@pytest.mark.sync
+def test_inner_hits_are_serialized_to_dict(
+    data_client: Elasticsearch,
+) -> None:
+    s = Search(index="git")[0:1].query(
+        "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all")
+    )
+    response = s.execute()
+    d = response.to_dict(recursive=True)
+    assert isinstance(d, dict)
+    assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict)
+
+    # iterating over the results changes the format of the internal AttrDict
+    for hit in response:
+        pass
+
+    d = response.to_dict(recursive=True)
+    assert isinstance(d, dict)
+    assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict)
+
+
+@pytest.mark.sync
+def test_scan_respects_doc_types(data_client: Elasticsearch) -> None:
+    repos = [repo for repo in Repository.search().scan()]
+
+    assert 1 == len(repos)
+    assert isinstance(repos[0], Repository)
+    assert repos[0].organization == "elasticsearch"
+
+
+@pytest.mark.sync
+def test_scan_iterates_through_all_docs(
+    data_client: Elasticsearch,
+) -> None:
+    s = Search(index="flat-git")
+
+    commits = [commit for commit in s.scan()]
+
+    assert 52 == len(commits)
+    assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
+
+
+@pytest.mark.sync
+def test_search_after(data_client: Elasticsearch) -> None:
+    page_size = 7
+    s = Search(index="flat-git")[:page_size].sort("authored_date")
+    commits = []
+    while True:
+        r = s.execute()
+        commits += r.hits
+        if len(r.hits) < page_size:
+            break
+        s = s.search_after()
+
+    assert 52 == len(commits)
+    assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
+
+
+@pytest.mark.sync
+def test_search_after_no_search(data_client: Elasticsearch) -> None:
+    s = Search(index="flat-git")
+    with raises(
+        ValueError, match="A search must be executed before using search_after"
+    ):
+        s.search_after()
+    s.count()
+    with raises(
+        ValueError, match="A search must be executed before using search_after"
+    ):
+        s.search_after()
+
+
+@pytest.mark.sync
+def test_search_after_no_sort(data_client: Elasticsearch) -> None:
+    s = Search(index="flat-git")
+    r = s.execute()
+    with raises(
+        ValueError, match="Cannot use search_after when results are not sorted"
+    ):
+        r.search_after()
+
+
+@pytest.mark.sync
+def test_search_after_no_results(data_client: Elasticsearch) -> None:
+    s = Search(index="flat-git")[:100].sort("authored_date")
+    r = s.execute()
+    assert 52 == len(r.hits)
+    s = s.search_after()
+    r = s.execute()
+    assert 0 == len(r.hits)
+    with raises(
+        ValueError, match="Cannot use search_after when there are no search results"
+    ):
+        r.search_after()
+
+
+@pytest.mark.sync
+def test_point_in_time(data_client: Elasticsearch) -> None:
+    page_size = 7
+    commits = []
+    with Search(index="flat-git")[:page_size].point_in_time(keep_alive="30s") as s:
+        pit_id = s._extra["pit"]["id"]
+        while True:
+            r = s.execute()
+            commits += r.hits
+            if len(r.hits) < page_size:
+                break
+            s = s.search_after()
+            assert pit_id == s._extra["pit"]["id"]
+            assert "30s" == s._extra["pit"]["keep_alive"]
+
+    assert 52 == len(commits)
+    assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
+
+
+@pytest.mark.sync
+def test_iterate(data_client: Elasticsearch) -> None:
+    s = Search(index="flat-git")
+
+    commits = [commit for commit in s.iterate()]
+
+    assert 52 == len(commits)
+    assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
+
+
+@pytest.mark.sync
+def test_response_is_cached(data_client: Elasticsearch) -> None:
+    s = Repository.search()
+    repos = [repo for repo in s]
+
+    assert hasattr(s, "_response")
+    assert s._response.hits == repos
+
+
+@pytest.mark.sync
+def test_multi_search(data_client: Elasticsearch) -> None:
+    s1 = Repository.search()
+    s2 = Search[Repository](index="flat-git")
+
+    ms = MultiSearch[Repository]()
+    ms = ms.add(s1).add(s2)
+
+    r1, r2 = ms.execute()
+
+    assert 1 == len(r1)
+    assert isinstance(r1[0], Repository)
+    assert r1._search is s1
+
+    assert 52 == r2.hits.total.value  # type: ignore[attr-defined]
+    assert r2._search is s2
+
+
+@pytest.mark.sync
+def test_multi_missing(data_client: Elasticsearch) -> None:
+    s1 = Repository.search()
+    s2 = Search[Repository](index="flat-git")
+    s3 = Search[Repository](index="does_not_exist")
+
+    ms = MultiSearch[Repository]()
+    ms = ms.add(s1).add(s2).add(s3)
+
+    with raises(ApiError):
+        ms.execute()
+
+    r1, r2, r3 = ms.execute(raise_on_error=False)
+
+    assert 1 == len(r1)
+    assert isinstance(r1[0], Repository)
+    assert r1._search is s1
+
+    assert 52 == r2.hits.total.value  # type: ignore[attr-defined]
+    assert r2._search is s2
+
+    assert r3 is None
+
+
+@pytest.mark.sync
+def test_raw_subfield_can_be_used_in_aggs(
+    data_client: Elasticsearch,
+) -> None:
+    s = Search(index="git")[0:0]
+    s.aggs.bucket("authors", "terms", field="author.name.raw", size=1)
+
+    r = s.execute()
+
+    authors = r.aggregations.authors
+    assert 1 == len(authors)
+    assert {"key": "Honza Král", "doc_count": 52} == authors[0]
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_update_by_query.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_update_by_query.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/_sync/test_update_by_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/_sync/test_update_by_query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,85 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import UpdateByQuery
+from elasticsearch.dsl.search import Q
+
+
+@pytest.mark.sync
+def test_update_by_query_no_script(
+    write_client: Elasticsearch, setup_ubq_tests: str
+) -> None:
+    index = setup_ubq_tests
+
+    ubq = (
+        UpdateByQuery(using=write_client)
+        .index(index)
+        .filter(~Q("exists", field="is_public"))
+    )
+    response = ubq.execute()
+
+    assert response.total == 52
+    assert response["took"] > 0
+    assert not response.timed_out
+    assert response.updated == 52
+    assert response.deleted == 0
+    assert response.took > 0
+    assert response.success()
+
+
+@pytest.mark.sync
+def test_update_by_query_with_script(
+    write_client: Elasticsearch, setup_ubq_tests: str
+) -> None:
+    index = setup_ubq_tests
+
+    ubq = (
+        UpdateByQuery(using=write_client)
+        .index(index)
+        .filter(~Q("exists", field="parent_shas"))
+        .script(source="ctx._source.is_public = false")
+    )
+    ubq = ubq.params(conflicts="proceed")
+
+    response = ubq.execute()
+    assert response.total == 2
+    assert response.updated == 2
+    assert response.version_conflicts == 0
+
+
+@pytest.mark.sync
+def test_delete_by_query_with_script(
+    write_client: Elasticsearch, setup_ubq_tests: str
+) -> None:
+    index = setup_ubq_tests
+
+    ubq = (
+        UpdateByQuery(using=write_client)
+        .index(index)
+        .filter(Q("match", parent_shas="1dd19210b5be92b960f7db6f66ae526288edccc3"))
+        .script(source='ctx.op = "delete"')
+    )
+    ubq = ubq.params(conflicts="proceed")
+
+    response = ubq.execute()
+
+    assert response.total == 1
+    assert response.deleted == 1
+    assert response.success()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_count.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_count.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_count.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_count.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,45 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl.search import Q, Search
+
+
+def test_count_all(data_client: Elasticsearch) -> None:
+    s = Search(using=data_client).index("git")
+    assert 53 == s.count()
+
+
+def test_count_prefetch(data_client: Elasticsearch, mocker: Any) -> None:
+    mocker.spy(data_client, "count")
+
+    search = Search(using=data_client).index("git")
+    search.execute()
+    assert search.count() == 53
+    assert data_client.count.call_count == 0  # type: ignore[attr-defined]
+
+    search._response.hits.total.relation = "gte"  # type: ignore[attr-defined]
+    assert search.count() == 53
+    assert data_client.count.call_count == 1  # type: ignore[attr-defined]
+
+
+def test_count_filter(data_client: Elasticsearch) -> None:
+    s = Search(using=data_client).index("git").filter(~Q("exists", field="parent_shas"))
+    # initial commit + repo document
+    assert 2 == s.count()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_data.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_data.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_data.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_data.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,1093 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, Dict
+
+from elasticsearch import Elasticsearch
+
+user_mapping = {
+    "properties": {"name": {"type": "text", "fields": {"raw": {"type": "keyword"}}}}
+}
+
+FLAT_GIT_INDEX: Dict[str, Any] = {
+    "settings": {
+        # custom analyzer for analyzing file paths
+        "analysis": {
+            "analyzer": {
+                "file_path": {
+                    "type": "custom",
+                    "tokenizer": "path_hierarchy",
+                    "filter": ["lowercase"],
+                }
+            }
+        },
+    },
+    "mappings": {
+        "properties": {
+            "description": {"type": "text", "analyzer": "snowball"},
+            "author": user_mapping,
+            "authored_date": {"type": "date"},
+            "committer": user_mapping,
+            "committed_date": {"type": "date"},
+            "parent_shas": {"type": "keyword"},
+            "files": {
+                "type": "text",
+                "analyzer": "file_path",
+                "fielddata": True,
+            },
+        }
+    },
+}
+
+GIT_INDEX: Dict[str, Any] = {
+    "settings": {
+        # custom analyzer for analyzing file paths
+        "analysis": {
+            "analyzer": {
+                "file_path": {
+                    "type": "custom",
+                    "tokenizer": "path_hierarchy",
+                    "filter": ["lowercase"],
+                }
+            }
+        },
+    },
+    "mappings": {
+        "properties": {
+            # common fields
+            "description": {"type": "text", "analyzer": "snowball"},
+            "commit_repo": {"type": "join", "relations": {"repo": "commit"}},
+            # COMMIT mappings
+            "author": user_mapping,
+            "authored_date": {"type": "date"},
+            "committer": user_mapping,
+            "committed_date": {"type": "date"},
+            "parent_shas": {"type": "keyword"},
+            "files": {
+                "type": "text",
+                "analyzer": "file_path",
+                "fielddata": True,
+            },
+            # REPO mappings
+            "is_public": {"type": "boolean"},
+            "owner": user_mapping,
+            "created_at": {"type": "date"},
+            "tags": {"type": "keyword"},
+        }
+    },
+}
+
+
+def create_flat_git_index(client: Elasticsearch, index: str) -> None:
+    client.indices.create(index=index, body=FLAT_GIT_INDEX)
+
+
+def create_git_index(client: Elasticsearch, index: str) -> None:
+    client.indices.create(index=index, body=GIT_INDEX)
+
+
+DATA = [
+    # repository
+    {
+        "_id": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": "repo",
+            "organization": "elasticsearch",
+            "created_at": "2014-03-03",
+            "owner": {"name": "elasticsearch"},
+            "is_public": True,
+        },
+        "_index": "git",
+    },
+    # documents
+    {
+        "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/aggs.py",
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_aggs.py",
+                "test_elasticsearch_dsl/test_search.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 7, "insertions": 23, "lines": 30, "files": 4},
+            "description": "Make sure buckets aren't modified in-place",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["eb3e543323f189fd7b698e66295427204fff5755"],
+            "committed_date": "2014-05-02T13:47:19",
+            "authored_date": "2014-05-02T13:47:19.123+02:00",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "eb3e543323f189fd7b698e66295427204fff5755",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/search.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 18, "lines": 18, "files": 1},
+            "description": "Add communication with ES server",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["dd15b6ba17dd9ba16363a51f85b31f66f1fb1157"],
+            "committed_date": "2014-05-01T13:32:14",
+            "authored_date": "2014-05-01T13:32:14",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "dd15b6ba17dd9ba16363a51f85b31f66f1fb1157",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/utils.py",
+                "test_elasticsearch_dsl/test_result.py",
+                "elasticsearch_dsl/result.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 18, "insertions": 44, "lines": 62, "files": 3},
+            "description": "Minor cleanup and adding helpers for interactive python",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["ed19caf25abd25300e707fadf3f81b05c5673446"],
+            "committed_date": "2014-05-01T13:30:44",
+            "authored_date": "2014-05-01T13:30:44",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "ed19caf25abd25300e707fadf3f81b05c5673446",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/aggs.py",
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_search.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 28, "lines": 28, "files": 3},
+            "description": "Make sure aggs do copy-on-write",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["583e52c71e9a72c1b291ec5843683d8fa8f1ce2d"],
+            "committed_date": "2014-04-27T16:28:09",
+            "authored_date": "2014-04-27T16:28:09",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "583e52c71e9a72c1b291ec5843683d8fa8f1ce2d",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/aggs.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 1, "insertions": 1, "lines": 2, "files": 1},
+            "description": "Use __setitem__ from DslBase in AggsBase",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["1dd19210b5be92b960f7db6f66ae526288edccc3"],
+            "committed_date": "2014-04-27T15:51:53",
+            "authored_date": "2014-04-27T15:51:53",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "1dd19210b5be92b960f7db6f66ae526288edccc3",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/aggs.py",
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_search.py",
+                "elasticsearch_dsl/search.py",
+                "elasticsearch_dsl/filter.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 21, "insertions": 98, "lines": 119, "files": 5},
+            "description": "Have Search clone itself on any change besides aggs",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["b4c9e29376af2e42a4e6dc153f0f293b1a18bac3"],
+            "committed_date": "2014-04-26T14:49:43",
+            "authored_date": "2014-04-26T14:49:43",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "b4c9e29376af2e42a4e6dc153f0f293b1a18bac3",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["test_elasticsearch_dsl/test_result.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 5, "lines": 5, "files": 1},
+            "description": "Add tests for [] on response",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["a64a54181b232bb5943bd16960be9416e402f5f5"],
+            "committed_date": "2014-04-26T13:56:52",
+            "authored_date": "2014-04-26T13:56:52",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "a64a54181b232bb5943bd16960be9416e402f5f5",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["test_elasticsearch_dsl/test_result.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 1, "insertions": 7, "lines": 8, "files": 1},
+            "description": "Test access to missing fields raises appropriate exceptions",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["df3f778a3d37b170bde6979a4ef2d9e3e6400778"],
+            "committed_date": "2014-04-25T16:01:07",
+            "authored_date": "2014-04-25T16:01:07",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "df3f778a3d37b170bde6979a4ef2d9e3e6400778",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/utils.py",
+                "test_elasticsearch_dsl/test_result.py",
+                "elasticsearch_dsl/result.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 8, "insertions": 31, "lines": 39, "files": 3},
+            "description": "Support attribute access even for inner/nested objects",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["7e599e116b5ff5d271ce3fe1ebc80e82ab3d5925"],
+            "committed_date": "2014-04-25T15:59:02",
+            "authored_date": "2014-04-25T15:59:02",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "7e599e116b5ff5d271ce3fe1ebc80e82ab3d5925",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "test_elasticsearch_dsl/test_result.py",
+                "elasticsearch_dsl/result.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 149, "lines": 149, "files": 2},
+            "description": "Added a prototype of a Respose and Result classes",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["e2882d28cb8077eaa3e5d8ae76543482d4d90f7e"],
+            "committed_date": "2014-04-25T15:12:15",
+            "authored_date": "2014-04-25T15:12:15",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "e2882d28cb8077eaa3e5d8ae76543482d4d90f7e",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["docs/index.rst"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 6, "lines": 6, "files": 1},
+            "description": "add warning to the docs",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["51f94d83d1c47d3b81207736ca97a1ec6302678f"],
+            "committed_date": "2014-04-22T19:16:21",
+            "authored_date": "2014-04-22T19:16:21",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "51f94d83d1c47d3b81207736ca97a1ec6302678f",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/utils.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 3, "insertions": 29, "lines": 32, "files": 1},
+            "description": "Add some comments to the code",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["0950f6c600b49e2bf012d03b02250fb71c848555"],
+            "committed_date": "2014-04-22T19:12:06",
+            "authored_date": "2014-04-22T19:12:06",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "0950f6c600b49e2bf012d03b02250fb71c848555",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["README.rst"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 6, "lines": 6, "files": 1},
+            "description": "Added a WIP warning",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["54d058f5ac6be8225ef61d5529772aada42ec6c8"],
+            "committed_date": "2014-04-20T00:19:25",
+            "authored_date": "2014-04-20T00:19:25",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "54d058f5ac6be8225ef61d5529772aada42ec6c8",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/__init__.py",
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_search.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 36, "insertions": 7, "lines": 43, "files": 3},
+            "description": "Remove the operator kwarg from .query",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["4cb07845e45787abc1f850c0b561e487e0034424"],
+            "committed_date": "2014-04-20T00:17:25",
+            "authored_date": "2014-04-20T00:17:25",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "4cb07845e45787abc1f850c0b561e487e0034424",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/aggs.py",
+                "test_elasticsearch_dsl/test_search.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 35, "insertions": 49, "lines": 84, "files": 2},
+            "description": "Complex example",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["578abe80f76aafd7e81fe46a44403e601733a938"],
+            "committed_date": "2014-03-24T20:48:45",
+            "authored_date": "2014-03-24T20:48:45",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "578abe80f76aafd7e81fe46a44403e601733a938",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["test_elasticsearch_dsl/test_search.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 2, "insertions": 0, "lines": 2, "files": 1},
+            "description": "removing extra whitespace",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["ecb84f03565940c7d294dbc80723420dcfbab340"],
+            "committed_date": "2014-03-24T20:42:23",
+            "authored_date": "2014-03-24T20:42:23",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "ecb84f03565940c7d294dbc80723420dcfbab340",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["test_elasticsearch_dsl/test_search.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 1, "insertions": 3, "lines": 4, "files": 1},
+            "description": "Make sure attribute access works for .query on Search",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["9a247c876ab66e2bca56b25f392d054e613b1b2a"],
+            "committed_date": "2014-03-24T20:35:02",
+            "authored_date": "2014-03-24T20:34:46",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "9a247c876ab66e2bca56b25f392d054e613b1b2a",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/search.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 2, "lines": 2, "files": 1},
+            "description": "Make sure .index and .doc_type methods are chainable",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["cee5e46947d510a49edd3609ff91aab7b1f3ac89"],
+            "committed_date": "2014-03-24T20:27:46",
+            "authored_date": "2014-03-24T20:27:46",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "cee5e46947d510a49edd3609ff91aab7b1f3ac89",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_search.py",
+                "elasticsearch_dsl/filter.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 13, "insertions": 128, "lines": 141, "files": 3},
+            "description": "Added .filter and .post_filter to Search",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["1d6857182b09a556d58c6bc5bdcb243092812ba3"],
+            "committed_date": "2014-03-24T20:26:57",
+            "authored_date": "2014-03-24T20:26:57",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "1d6857182b09a556d58c6bc5bdcb243092812ba3",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 24, "insertions": 29, "lines": 53, "files": 2},
+            "description": "Extracted combination logic into DslBase",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["4ad92f15a1955846c01642318303a821e8435b75"],
+            "committed_date": "2014-03-24T20:03:51",
+            "authored_date": "2014-03-24T20:03:51",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "4ad92f15a1955846c01642318303a821e8435b75",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 43, "insertions": 45, "lines": 88, "files": 2},
+            "description": "Extracted bool-related logic to a mixin to be reused by filters",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["6eb39dc2825605543ac1ed0b45b9b6baeecc44c2"],
+            "committed_date": "2014-03-24T19:16:16",
+            "authored_date": "2014-03-24T19:16:16",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "6eb39dc2825605543ac1ed0b45b9b6baeecc44c2",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_search.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 1, "insertions": 32, "lines": 33, "files": 2},
+            "description": "Enable otheroperators when querying on Search object",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["be094c7b307332cb6039bf9a7c984d2c7593ddff"],
+            "committed_date": "2014-03-24T18:25:10",
+            "authored_date": "2014-03-24T18:25:10",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "be094c7b307332cb6039bf9a7c984d2c7593ddff",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/utils.py",
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 23, "insertions": 35, "lines": 58, "files": 3},
+            "description": "make sure query operations always return copies",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["b2576e3b6437e2cb9d8971fee4ead60df91fd75b"],
+            "committed_date": "2014-03-24T18:10:37",
+            "authored_date": "2014-03-24T18:03:13",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "b2576e3b6437e2cb9d8971fee4ead60df91fd75b",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 1, "insertions": 53, "lines": 54, "files": 2},
+            "description": "Adding or operator for queries",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["1be002170ac3cd59d2e97824b83b88bb3c9c60ed"],
+            "committed_date": "2014-03-24T17:53:38",
+            "authored_date": "2014-03-24T17:53:38",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "1be002170ac3cd59d2e97824b83b88bb3c9c60ed",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 35, "lines": 35, "files": 2},
+            "description": "Added inverting of queries",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["24e1e38b2f704f65440d96c290b7c6cd54c2e00e"],
+            "committed_date": "2014-03-23T17:44:36",
+            "authored_date": "2014-03-23T17:44:36",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "24e1e38b2f704f65440d96c290b7c6cd54c2e00e",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/aggs.py", "elasticsearch_dsl/utils.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 5, "insertions": 1, "lines": 6, "files": 2},
+            "description": "Change equality checks to use .to_dict()",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["277cfaedbaf3705ed74ad6296227e1172c97a63f"],
+            "committed_date": "2014-03-23T17:43:01",
+            "authored_date": "2014-03-23T17:43:01",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "277cfaedbaf3705ed74ad6296227e1172c97a63f",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 1, "insertions": 11, "lines": 12, "files": 2},
+            "description": "Test combining of bool queries",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["6aa3868a6a9f35f71553ce96f9d3d63c74d054fd"],
+            "committed_date": "2014-03-21T15:15:06",
+            "authored_date": "2014-03-21T15:15:06",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "6aa3868a6a9f35f71553ce96f9d3d63c74d054fd",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 1, "insertions": 23, "lines": 24, "files": 2},
+            "description": "Adding & operator for queries",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["bb311eb35e7eb53fb5ae01e3f80336866c7e3e37"],
+            "committed_date": "2014-03-21T15:10:08",
+            "authored_date": "2014-03-21T15:10:08",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "bb311eb35e7eb53fb5ae01e3f80336866c7e3e37",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/utils.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 1, "insertions": 4, "lines": 5, "files": 2},
+            "description": "Don't serialize empty typed fields into dict",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["aea8ea9e421bd53a5b058495e68c3fd57bb1dacc"],
+            "committed_date": "2014-03-15T16:29:37",
+            "authored_date": "2014-03-15T16:29:37",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "aea8ea9e421bd53a5b058495e68c3fd57bb1dacc",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/utils.py",
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 3, "insertions": 37, "lines": 40, "files": 3},
+            "description": "Bool queries, when combining just adds their params together",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["a8819a510b919be43ff3011b904f257798fb8916"],
+            "committed_date": "2014-03-15T16:16:40",
+            "authored_date": "2014-03-15T16:16:40",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "a8819a510b919be43ff3011b904f257798fb8916",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["test_elasticsearch_dsl/run_tests.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 6, "insertions": 2, "lines": 8, "files": 1},
+            "description": "Simpler run_tests.py",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["e35792a725be2325fc54d3fcb95a7d38d8075a99"],
+            "committed_date": "2014-03-15T16:02:21",
+            "authored_date": "2014-03-15T16:02:21",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "e35792a725be2325fc54d3fcb95a7d38d8075a99",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/aggs.py", "elasticsearch_dsl/query.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 2, "insertions": 2, "lines": 4, "files": 2},
+            "description": "Maku we don't treat shortcuts as methods.",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["3179d778dc9e3f2883d5f7ffa63b9ae0399c16bc"],
+            "committed_date": "2014-03-15T15:59:21",
+            "authored_date": "2014-03-15T15:59:21",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "3179d778dc9e3f2883d5f7ffa63b9ae0399c16bc",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/aggs.py",
+                "elasticsearch_dsl/query.py",
+                "elasticsearch_dsl/utils.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 9, "insertions": 5, "lines": 14, "files": 3},
+            "description": "Centralize == of Dsl objects",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["b5e7d0c4b284211df8f7b464fcece93a27a802fb"],
+            "committed_date": "2014-03-10T21:37:24",
+            "authored_date": "2014-03-10T21:37:24",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "b5e7d0c4b284211df8f7b464fcece93a27a802fb",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/aggs.py",
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_search.py",
+                "elasticsearch_dsl/utils.py",
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_aggs.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 75, "insertions": 115, "lines": 190, "files": 6},
+            "description": "Experimental draft with more declarative DSL",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["0fe741b43adee5ca1424584ddd3f35fa33f8733c"],
+            "committed_date": "2014-03-10T21:34:39",
+            "authored_date": "2014-03-10T21:34:39",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "0fe741b43adee5ca1424584ddd3f35fa33f8733c",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["test_elasticsearch_dsl/test_search.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 2, "insertions": 2, "lines": 4, "files": 1},
+            "description": "Make sure .query is chainable",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["a22be5933d4b022cbacee867b1aece120208edf3"],
+            "committed_date": "2014-03-07T17:41:59",
+            "authored_date": "2014-03-07T17:41:59",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "a22be5933d4b022cbacee867b1aece120208edf3",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/aggs.py",
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_search.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 14, "insertions": 44, "lines": 58, "files": 3},
+            "description": "Search now does aggregations",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["e823686aacfc4bdcb34ffdab337a26fa09659a9a"],
+            "committed_date": "2014-03-07T17:29:55",
+            "authored_date": "2014-03-07T17:29:55",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "e823686aacfc4bdcb34ffdab337a26fa09659a9a",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [".gitignore"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 1, "lines": 1, "files": 1},
+            "description": "Ignore html coverage report",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["e0aedb3011c71d704deec03a8f32b2b360d6e364"],
+            "committed_date": "2014-03-07T17:03:23",
+            "authored_date": "2014-03-07T17:03:23",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "e0aedb3011c71d704deec03a8f32b2b360d6e364",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/aggs.py",
+                "test_elasticsearch_dsl/test_aggs.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 228, "lines": 228, "files": 2},
+            "description": "Added aggregation DSL objects",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["61cbc0aa62a0b776ae5e333406659dbb2f5cfbbd"],
+            "committed_date": "2014-03-07T16:25:55",
+            "authored_date": "2014-03-07T16:25:55",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "61cbc0aa62a0b776ae5e333406659dbb2f5cfbbd",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 12, "insertions": 7, "lines": 19, "files": 2},
+            "description": "Only retrieve DslClass, leave the instantiation to the caller",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["647f1017a7b17a913e07af70a3b03202f6adbdfd"],
+            "committed_date": "2014-03-07T15:27:43",
+            "authored_date": "2014-03-07T15:27:43",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "647f1017a7b17a913e07af70a3b03202f6adbdfd",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "test_elasticsearch_dsl/test_search.py",
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 19, "insertions": 19, "lines": 38, "files": 3},
+            "description": "No need to replicate Query suffix when in query namespace",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["7c4f94ecdb38f0e91c7ee52f579c0ea148afcc7d"],
+            "committed_date": "2014-03-07T15:19:01",
+            "authored_date": "2014-03-07T15:19:01",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "7c4f94ecdb38f0e91c7ee52f579c0ea148afcc7d",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["elasticsearch_dsl/utils.py"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 2, "insertions": 3, "lines": 5, "files": 1},
+            "description": "Ask forgiveness, not permission",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["c10793c2ca43688195e415b25b674ff34d58eaff"],
+            "committed_date": "2014-03-07T15:13:22",
+            "authored_date": "2014-03-07T15:13:22",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "c10793c2ca43688195e415b25b674ff34d58eaff",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/utils.py",
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 24, "insertions": 27, "lines": 51, "files": 3},
+            "description": "Extract DSL object registration to DslMeta",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["d8867fdb17fcf4c696657740fa08d29c36adc6ec"],
+            "committed_date": "2014-03-07T15:12:13",
+            "authored_date": "2014-03-07T15:10:31",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "d8867fdb17fcf4c696657740fa08d29c36adc6ec",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_search.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 13, "lines": 13, "files": 2},
+            "description": "Search.to_dict",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["2eb7cd980d917ed6f4a4dd8e246804f710ec5082"],
+            "committed_date": "2014-03-07T02:58:33",
+            "authored_date": "2014-03-07T02:58:33",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "2eb7cd980d917ed6f4a4dd8e246804f710ec5082",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/search.py",
+                "test_elasticsearch_dsl/test_search.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 113, "lines": 113, "files": 2},
+            "description": "Basic Search object",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["11708576f9118e0dbf27ae1f8a7b799cf281b511"],
+            "committed_date": "2014-03-06T21:02:03",
+            "authored_date": "2014-03-06T21:01:05",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "11708576f9118e0dbf27ae1f8a7b799cf281b511",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 13, "lines": 13, "files": 2},
+            "description": "MatchAll query + anything is anything",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["1dc496e5c7c1b2caf290df477fca2db61ebe37e0"],
+            "committed_date": "2014-03-06T20:40:39",
+            "authored_date": "2014-03-06T20:39:52",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "1dc496e5c7c1b2caf290df477fca2db61ebe37e0",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 53, "lines": 53, "files": 2},
+            "description": "From_dict, Q(dict) and bool query parses it's subqueries",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["d407f99d1959b7b862a541c066d9fd737ce913f3"],
+            "committed_date": "2014-03-06T20:24:30",
+            "authored_date": "2014-03-06T20:24:30",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "d407f99d1959b7b862a541c066d9fd737ce913f3",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": ["CONTRIBUTING.md", "README.rst"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 6, "insertions": 21, "lines": 27, "files": 2},
+            "description": "Housekeeping - licence and updated generic CONTRIBUTING.md",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["277e8ecc7395754d1ba1f2411ec32337a3e9d73f"],
+            "committed_date": "2014-03-05T16:21:44",
+            "authored_date": "2014-03-05T16:21:44",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "277e8ecc7395754d1ba1f2411ec32337a3e9d73f",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/query.py",
+                "setup.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 59, "lines": 59, "files": 3},
+            "description": "Automatic query registration and Q function",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["8f1e34bd8f462fec50bcc10971df2d57e2986604"],
+            "committed_date": "2014-03-05T16:18:52",
+            "authored_date": "2014-03-05T16:18:52",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "8f1e34bd8f462fec50bcc10971df2d57e2986604",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/query.py",
+                "test_elasticsearch_dsl/test_query.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 54, "lines": 54, "files": 2},
+            "description": "Initial implementation of match and bool queries",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["fcff47ddcc6d08be5739d03dd30f504fb9db2608"],
+            "committed_date": "2014-03-05T15:55:06",
+            "authored_date": "2014-03-05T15:55:06",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "fcff47ddcc6d08be5739d03dd30f504fb9db2608",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "docs/Makefile",
+                "CONTRIBUTING.md",
+                "docs/conf.py",
+                "LICENSE",
+                "Changelog.rst",
+                "docs/index.rst",
+                "docs/Changelog.rst",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 692, "lines": 692, "files": 7},
+            "description": "Docs template",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["febe8127ae48fcc81778c0fb2d628f1bcc0a0350"],
+            "committed_date": "2014-03-04T01:42:31",
+            "authored_date": "2014-03-04T01:42:31",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "febe8127ae48fcc81778c0fb2d628f1bcc0a0350",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [
+                "elasticsearch_dsl/__init__.py",
+                "test_elasticsearch_dsl/run_tests.py",
+                "setup.py",
+                "README.rst",
+                "test_elasticsearch_dsl/__init__.py",
+            ],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 82, "lines": 82, "files": 5},
+            "description": "Empty project structure",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": ["2a8f1ce89760bfc72808f3945b539eae650acac9"],
+            "committed_date": "2014-03-04T01:37:49",
+            "authored_date": "2014-03-03T18:23:55",
+        },
+        "_index": "git",
+    },
+    {
+        "_id": "2a8f1ce89760bfc72808f3945b539eae650acac9",
+        "routing": "elasticsearch-dsl-py",
+        "_source": {
+            "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"},
+            "files": [".gitignore"],
+            "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "stats": {"deletions": 0, "insertions": 9, "lines": 9, "files": 1},
+            "description": "Initial commit, .gitignore",
+            "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"},
+            "parent_shas": [],
+            "committed_date": "2014-03-03T18:15:05",
+            "authored_date": "2014-03-03T18:15:05",
+        },
+        "_index": "git",
+    },
+]
+
+
+def flatten_doc(d: Dict[str, Any]) -> Dict[str, Any]:
+    src = d["_source"].copy()
+    del src["commit_repo"]
+    return {"_index": "flat-git", "_id": d["_id"], "_source": src}
+
+
+FLAT_DATA = [flatten_doc(d) for d in DATA if "routing" in d]
+
+
+def create_test_git_data(d: Dict[str, Any]) -> Dict[str, Any]:
+    src = d["_source"].copy()
+    return {
+        "_index": "test-git",
+        "routing": "elasticsearch-dsl-py",
+        "_id": d["_id"],
+        "_source": src,
+    }
+
+
+TEST_GIT_DATA = [create_test_git_data(d) for d in DATA]
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_alias_migration.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_alias_migration.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_alias_migration.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_alias_migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,74 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+
+from ..async_examples import alias_migration
+from ..async_examples.alias_migration import ALIAS, PATTERN, BlogPost, migrate
+
+
+@pytest.mark.anyio
+async def test_alias_migration(async_write_client: AsyncElasticsearch) -> None:
+    # create the index
+    await alias_migration.setup()
+
+    # verify that template, index, and alias has been set up
+    assert await async_write_client.indices.exists_index_template(name=ALIAS)
+    assert await async_write_client.indices.exists(index=PATTERN)
+    assert await async_write_client.indices.exists_alias(name=ALIAS)
+
+    indices = await async_write_client.indices.get(index=PATTERN)
+    assert len(indices) == 1
+    index_name, _ = indices.popitem()
+
+    # which means we can now save a document
+    with open(__file__) as f:
+        bp = BlogPost(
+            _id=0,
+            title="Hello World!",
+            tags=["testing", "dummy"],
+            content=f.read(),
+            published=None,
+        )
+        await bp.save(refresh=True)
+
+    assert await BlogPost.search().count() == 1
+
+    # _matches work which means we get BlogPost instance
+    bp = (await BlogPost.search().execute())[0]
+    assert isinstance(bp, BlogPost)
+    assert not bp.is_published()
+    assert "0" == bp.meta.id
+
+    # create new index
+    await migrate()
+
+    indices = await async_write_client.indices.get(index=PATTERN)
+    assert 2 == len(indices)
+    alias = await async_write_client.indices.get(index=ALIAS)
+    assert 1 == len(alias)
+    assert index_name not in alias
+
+    # data has been moved properly
+    assert await BlogPost.search().count() == 1
+
+    # _matches work which means we get BlogPost instance
+    bp = (await BlogPost.search().execute())[0]
+    assert isinstance(bp, BlogPost)
+    assert "0" == bp.meta.id
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_completion.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_completion.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_completion.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_completion.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,40 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+
+from ..async_examples.completion import Person
+
+
+@pytest.mark.anyio
+async def test_person_suggests_on_all_variants_of_name(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    await Person.init(using=async_write_client)
+
+    await Person(_id=None, name="Honza Král", popularity=42).save(refresh=True)
+
+    s = Person.search().suggest("t", "kra", completion={"field": "suggest"})
+    response = await s.execute()
+
+    opts = response.suggest["t"][0].options
+
+    assert 1 == len(opts)
+    assert opts[0]._score == 42
+    assert opts[0]._source.name == "Honza Král"
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_composite_aggs.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_composite_aggs.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_composite_aggs.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_composite_aggs.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,57 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.dsl import A, AsyncSearch
+
+from ..async_examples.composite_agg import scan_aggs
+
+
+@pytest.mark.anyio
+async def test_scan_aggs_exhausts_all_files(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    s = AsyncSearch(index="flat-git")
+    key_aggs = [{"files": A("terms", field="files")}]
+    file_list = [f async for f in scan_aggs(s, key_aggs)]
+
+    assert len(file_list) == 26
+
+
+@pytest.mark.anyio
+async def test_scan_aggs_with_multiple_aggs(
+    async_data_client: AsyncElasticsearch,
+) -> None:
+    s = AsyncSearch(index="flat-git")
+    key_aggs = [
+        {"files": A("terms", field="files")},
+        {
+            "months": A(
+                "date_histogram", field="committed_date", calendar_interval="month"
+            )
+        },
+    ]
+    file_list = [
+        f
+        async for f in scan_aggs(
+            s, key_aggs, {"first_seen": A("min", field="committed_date")}
+        )
+    ]
+
+    assert len(file_list) == 47
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_parent_child.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_parent_child.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_parent_child.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_parent_child.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,115 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.dsl import Q
+
+from ..async_examples.parent_child import Answer, Comment, Question, User, setup
+
+honza = User(
+    id=42,
+    signed_up=datetime(2013, 4, 3),
+    username="honzakral",
+    email="honza@elastic.co",
+    location="Prague",
+)
+
+nick = User(
+    id=47,
+    signed_up=datetime(2017, 4, 3),
+    username="fxdgear",
+    email="nick.lang@elastic.co",
+    location="Colorado",
+)
+
+
+@pytest.fixture
+async def question(async_write_client: AsyncElasticsearch) -> Question:
+    await setup()
+    assert await async_write_client.indices.exists_index_template(name="base")
+
+    # create a question object
+    q = Question(
+        _id=1,
+        author=nick,
+        tags=["elasticsearch", "python"],
+        title="How do I use elasticsearch from Python?",
+        body="""
+        I want to use elasticsearch, how do I do it from Python?
+        """,
+        created=None,
+        question_answer=None,
+        comments=[],
+    )
+    await q.save()
+    return q
+
+
+@pytest.mark.anyio
+async def test_comment(
+    async_write_client: AsyncElasticsearch, question: Question
+) -> None:
+    await question.add_comment(nick, "Just use elasticsearch-py")
+
+    q = await Question.get(1)  # type: ignore[arg-type]
+    assert isinstance(q, Question)
+    assert 1 == len(q.comments)
+
+    c = q.comments[0]
+    assert isinstance(c, Comment)
+    assert c.author.username == "fxdgear"
+
+
+@pytest.mark.anyio
+async def test_question_answer(
+    async_write_client: AsyncElasticsearch, question: Question
+) -> None:
+    a = await question.add_answer(honza, "Just use `elasticsearch-py`!")
+
+    assert isinstance(a, Answer)
+
+    # refresh the index so we can search right away
+    await Question._index.refresh()
+
+    # we can now fetch answers from elasticsearch
+    answers = await question.get_answers()
+    assert 1 == len(answers)
+    assert isinstance(answers[0], Answer)
+
+    search = Question.search().query(
+        "has_child",
+        type="answer",
+        inner_hits={},
+        query=Q("term", author__username__keyword="honzakral"),
+    )
+    response = await search.execute()
+
+    assert 1 == len(response.hits)
+
+    q = response.hits[0]
+    assert isinstance(q, Question)
+    assert 1 == len(q.meta.inner_hits.answer.hits)
+    assert q.meta.inner_hits.answer.hits is await q.get_answers()
+
+    a = q.meta.inner_hits.answer.hits[0]
+    assert isinstance(a, Answer)
+    assert isinstance(await a.get_question(), Question)
+    assert (await a.get_question()).meta.id == "1"
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_percolate.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_percolate.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_percolate.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_percolate.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,38 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+
+from ..async_examples.percolate import BlogPost, setup
+
+
+@pytest.mark.anyio
+async def test_post_gets_tagged_automatically(
+    async_write_client: AsyncElasticsearch,
+) -> None:
+    await setup()
+
+    bp = BlogPost(_id=47, content="nothing about snakes here!")
+    bp_py = BlogPost(_id=42, content="something about Python here!")
+
+    await bp.save()
+    await bp_py.save()
+
+    assert [] == bp.tags
+    assert {"programming", "development", "python"} == set(bp_py.tags)
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,65 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import sys
+from hashlib import md5
+from typing import Any, List, Tuple
+from unittest import SkipTest
+from unittest.mock import Mock, patch
+
+import pytest
+
+from elasticsearch import AsyncElasticsearch
+
+
+@pytest.mark.anyio
+async def test_vector_search(
+    async_write_client: AsyncElasticsearch, es_version: Tuple[int, ...]
+) -> None:
+    # this test only runs on Elasticsearch >= 8.11 because the example uses
+    # a dense vector without specifying an explicit size
+    if es_version < (8, 11):
+        raise SkipTest("This test requires Elasticsearch 8.11 or newer")
+
+    class MockSentenceTransformer:
+        def __init__(self, model: Any):
+            pass
+
+        def encode(self, text: str) -> List[float]:
+            vector = [int(ch) for ch in md5(text.encode()).digest()]
+            total = sum(vector)
+            return [float(v) / total for v in vector]
+
+    def mock_nltk_tokenize(content: str):
+        return content.split("\n")
+
+    # mock sentence_transformers and nltk, because they are quite big and
+    # irrelevant for testing the example logic
+    with patch.dict(
+        sys.modules,
+        {
+            "sentence_transformers": Mock(SentenceTransformer=MockSentenceTransformer),
+            "nltk": Mock(sent_tokenize=mock_nltk_tokenize),
+        },
+    ):
+        # import the example after the dependencies are mocked
+        from ..async_examples import vectors
+
+        await vectors.create()
+        await vectors.WorkplaceDoc._index.refresh()
+        results = await (await vectors.search("Welcome to our team!")).execute()
+        assert results[0].name == "Intellectual Property Policy"
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/__init__.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/__init__.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/__init__.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/__init__.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,16 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_alias_migration.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_alias_migration.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_alias_migration.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_alias_migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,74 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import Elasticsearch
+
+from ..examples import alias_migration
+from ..examples.alias_migration import ALIAS, PATTERN, BlogPost, migrate
+
+
+@pytest.mark.sync
+def test_alias_migration(write_client: Elasticsearch) -> None:
+    # create the index
+    alias_migration.setup()
+
+    # verify that template, index, and alias has been set up
+    assert write_client.indices.exists_index_template(name=ALIAS)
+    assert write_client.indices.exists(index=PATTERN)
+    assert write_client.indices.exists_alias(name=ALIAS)
+
+    indices = write_client.indices.get(index=PATTERN)
+    assert len(indices) == 1
+    index_name, _ = indices.popitem()
+
+    # which means we can now save a document
+    with open(__file__) as f:
+        bp = BlogPost(
+            _id=0,
+            title="Hello World!",
+            tags=["testing", "dummy"],
+            content=f.read(),
+            published=None,
+        )
+        bp.save(refresh=True)
+
+    assert BlogPost.search().count() == 1
+
+    # _matches work which means we get BlogPost instance
+    bp = (BlogPost.search().execute())[0]
+    assert isinstance(bp, BlogPost)
+    assert not bp.is_published()
+    assert "0" == bp.meta.id
+
+    # create new index
+    migrate()
+
+    indices = write_client.indices.get(index=PATTERN)
+    assert 2 == len(indices)
+    alias = write_client.indices.get(index=ALIAS)
+    assert 1 == len(alias)
+    assert index_name not in alias
+
+    # data has been moved properly
+    assert BlogPost.search().count() == 1
+
+    # _matches work which means we get BlogPost instance
+    bp = (BlogPost.search().execute())[0]
+    assert isinstance(bp, BlogPost)
+    assert "0" == bp.meta.id
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_completion.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_completion.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_completion.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_completion.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,40 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import Elasticsearch
+
+from ..examples.completion import Person
+
+
+@pytest.mark.sync
+def test_person_suggests_on_all_variants_of_name(
+    write_client: Elasticsearch,
+) -> None:
+    Person.init(using=write_client)
+
+    Person(_id=None, name="Honza Král", popularity=42).save(refresh=True)
+
+    s = Person.search().suggest("t", "kra", completion={"field": "suggest"})
+    response = s.execute()
+
+    opts = response.suggest["t"][0].options
+
+    assert 1 == len(opts)
+    assert opts[0]._score == 42
+    assert opts[0]._source.name == "Honza Král"
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_composite_aggs.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_composite_aggs.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_composite_aggs.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_composite_aggs.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,57 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import A, Search
+
+from ..examples.composite_agg import scan_aggs
+
+
+@pytest.mark.sync
+def test_scan_aggs_exhausts_all_files(
+    data_client: Elasticsearch,
+) -> None:
+    s = Search(index="flat-git")
+    key_aggs = [{"files": A("terms", field="files")}]
+    file_list = [f for f in scan_aggs(s, key_aggs)]
+
+    assert len(file_list) == 26
+
+
+@pytest.mark.sync
+def test_scan_aggs_with_multiple_aggs(
+    data_client: Elasticsearch,
+) -> None:
+    s = Search(index="flat-git")
+    key_aggs = [
+        {"files": A("terms", field="files")},
+        {
+            "months": A(
+                "date_histogram", field="committed_date", calendar_interval="month"
+            )
+        },
+    ]
+    file_list = [
+        f
+        for f in scan_aggs(
+            s, key_aggs, {"first_seen": A("min", field="committed_date")}
+        )
+    ]
+
+    assert len(file_list) == 47
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_parent_child.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_parent_child.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_parent_child.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_parent_child.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,111 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime
+
+import pytest
+
+from elasticsearch import Elasticsearch
+from elasticsearch.dsl import Q
+
+from ..examples.parent_child import Answer, Comment, Question, User, setup
+
+honza = User(
+    id=42,
+    signed_up=datetime(2013, 4, 3),
+    username="honzakral",
+    email="honza@elastic.co",
+    location="Prague",
+)
+
+nick = User(
+    id=47,
+    signed_up=datetime(2017, 4, 3),
+    username="fxdgear",
+    email="nick.lang@elastic.co",
+    location="Colorado",
+)
+
+
+@pytest.fixture
+def question(write_client: Elasticsearch) -> Question:
+    setup()
+    assert write_client.indices.exists_index_template(name="base")
+
+    # create a question object
+    q = Question(
+        _id=1,
+        author=nick,
+        tags=["elasticsearch", "python"],
+        title="How do I use elasticsearch from Python?",
+        body="""
+        I want to use elasticsearch, how do I do it from Python?
+        """,
+        created=None,
+        question_answer=None,
+        comments=[],
+    )
+    q.save()
+    return q
+
+
+@pytest.mark.sync
+def test_comment(write_client: Elasticsearch, question: Question) -> None:
+    question.add_comment(nick, "Just use elasticsearch-py")
+
+    q = Question.get(1)  # type: ignore[arg-type]
+    assert isinstance(q, Question)
+    assert 1 == len(q.comments)
+
+    c = q.comments[0]
+    assert isinstance(c, Comment)
+    assert c.author.username == "fxdgear"
+
+
+@pytest.mark.sync
+def test_question_answer(write_client: Elasticsearch, question: Question) -> None:
+    a = question.add_answer(honza, "Just use `elasticsearch-py`!")
+
+    assert isinstance(a, Answer)
+
+    # refresh the index so we can search right away
+    Question._index.refresh()
+
+    # we can now fetch answers from elasticsearch
+    answers = question.get_answers()
+    assert 1 == len(answers)
+    assert isinstance(answers[0], Answer)
+
+    search = Question.search().query(
+        "has_child",
+        type="answer",
+        inner_hits={},
+        query=Q("term", author__username__keyword="honzakral"),
+    )
+    response = search.execute()
+
+    assert 1 == len(response.hits)
+
+    q = response.hits[0]
+    assert isinstance(q, Question)
+    assert 1 == len(q.meta.inner_hits.answer.hits)
+    assert q.meta.inner_hits.answer.hits is q.get_answers()
+
+    a = q.meta.inner_hits.answer.hits[0]
+    assert isinstance(a, Answer)
+    assert isinstance(a.get_question(), Question)
+    assert (a.get_question()).meta.id == "1"
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_percolate.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_percolate.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_percolate.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_percolate.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,38 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pytest
+
+from elasticsearch import Elasticsearch
+
+from ..examples.percolate import BlogPost, setup
+
+
+@pytest.mark.sync
+def test_post_gets_tagged_automatically(
+    write_client: Elasticsearch,
+) -> None:
+    setup()
+
+    bp = BlogPost(_id=47, content="nothing about snakes here!")
+    bp_py = BlogPost(_id=42, content="something about Python here!")
+
+    bp.save()
+    bp_py.save()
+
+    assert [] == bp.tags
+    assert {"programming", "development", "python"} == set(bp_py.tags)
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,65 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import sys
+from hashlib import md5
+from typing import Any, List, Tuple
+from unittest import SkipTest
+from unittest.mock import Mock, patch
+
+import pytest
+
+from elasticsearch import Elasticsearch
+
+
+@pytest.mark.sync
+def test_vector_search(
+    write_client: Elasticsearch, es_version: Tuple[int, ...]
+) -> None:
+    # this test only runs on Elasticsearch >= 8.11 because the example uses
+    # a dense vector without specifying an explicit size
+    if es_version < (8, 11):
+        raise SkipTest("This test requires Elasticsearch 8.11 or newer")
+
+    class MockSentenceTransformer:
+        def __init__(self, model: Any):
+            pass
+
+        def encode(self, text: str) -> List[float]:
+            vector = [int(ch) for ch in md5(text.encode()).digest()]
+            total = sum(vector)
+            return [float(v) / total for v in vector]
+
+    def mock_nltk_tokenize(content: str):
+        return content.split("\n")
+
+    # mock sentence_transformers and nltk, because they are quite big and
+    # irrelevant for testing the example logic
+    with patch.dict(
+        sys.modules,
+        {
+            "sentence_transformers": Mock(SentenceTransformer=MockSentenceTransformer),
+            "nltk": Mock(sent_tokenize=mock_nltk_tokenize),
+        },
+    ):
+        # import the example after the dependencies are mocked
+        from ..examples import vectors
+
+        vectors.create()
+        vectors.WorkplaceDoc._index.refresh()
+        results = (vectors.search("Welcome to our team!")).execute()
+        assert results[0].name == "Intellectual Property Policy"
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/alias_migration.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/alias_migration.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/alias_migration.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/alias_migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,162 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Simple example with a single Document demonstrating how schema can be managed,
+including upgrading with reindexing.
+
+Key concepts:
+
+    * setup() function to first initialize the schema (as index template) in
+      elasticsearch. Can be called any time (recommended with every deploy of
+      your app).
+
+    * migrate() function to be called any time when the schema changes - it
+      will create a new index (by incrementing the version) and update the alias.
+      By default it will also (before flipping the alias) move the data from the
+      previous index to the new one.
+
+    * BlogPost._matches() class method is required for this code to work since
+      otherwise BlogPost will not be used to deserialize the documents as those
+      will have index set to the concrete index whereas the class refers to the
+      alias.
+"""
+import asyncio
+import os
+from datetime import datetime
+from fnmatch import fnmatch
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+
+from elasticsearch.dsl import AsyncDocument, Keyword, async_connections, mapped_field
+
+ALIAS = "test-blog"
+PATTERN = ALIAS + "-*"
+PRIORITY = 100
+
+
+class BlogPost(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    title: str
+    tags: List[str] = mapped_field(Keyword())
+    content: str
+    published: Optional[datetime] = mapped_field(default=None)
+
+    def is_published(self) -> bool:
+        return bool(self.published and datetime.now() > self.published)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # override _matches to match indices in a pattern instead of just ALIAS
+        # hit is the raw dict as returned by elasticsearch
+        return fnmatch(hit["_index"], PATTERN)
+
+    class Index:
+        # we will use an alias instead of the index
+        name = ALIAS
+        # set settings and possibly other attributes of the index like
+        # analyzers
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def setup() -> None:
+    """
+    Create the index template in elasticsearch specifying the mappings and any
+    settings to be used. This can be run at any time, ideally at every new code
+    deploy.
+    """
+    # create an index template
+    index_template = BlogPost._index.as_composable_template(
+        ALIAS, PATTERN, priority=PRIORITY
+    )
+    # upload the template into elasticsearch
+    # potentially overriding the one already there
+    await index_template.save()
+
+    # create the first index if it doesn't exist
+    if not await BlogPost._index.exists():
+        await migrate(move_data=False)
+
+
+async def migrate(move_data: bool = True, update_alias: bool = True) -> None:
+    """
+    Upgrade function that creates a new index for the data. Optionally it also can
+    (and by default will) reindex previous copy of the data into the new index
+    (specify ``move_data=False`` to skip this step) and update the alias to
+    point to the latest index (set ``update_alias=False`` to skip).
+
+    Note that while this function is running the application can still perform
+    any and all searches without any loss of functionality. It should, however,
+    not perform any writes at this time as those might be lost.
+    """
+    # construct a new index name by appending current timestamp
+    next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f"))
+
+    # get the low level connection
+    es = async_connections.get_connection()
+
+    # create new index, it will use the settings from the template
+    await es.indices.create(index=next_index)
+
+    if move_data:
+        # move data from current alias to the new index
+        await es.options(request_timeout=3600).reindex(
+            body={"source": {"index": ALIAS}, "dest": {"index": next_index}}
+        )
+        # refresh the index to make the changes visible
+        await es.indices.refresh(index=next_index)
+
+    if update_alias:
+        # repoint the alias to point to the newly created index
+        await es.indices.update_aliases(
+            body={
+                "actions": [
+                    {"remove": {"alias": ALIAS, "index": PATTERN}},
+                    {"add": {"alias": ALIAS, "index": next_index}},
+                ]
+            }
+        )
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await setup()
+
+    # create a new document
+    bp = BlogPost(
+        _id=0,
+        title="Hello World!",
+        tags=["testing", "dummy"],
+        content=open(__file__).read(),
+    )
+    await bp.save(refresh=True)
+
+    # create new index
+    await migrate()
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/completion.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/completion.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/completion.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/completion.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,114 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with completion suggester.
+
+In the ``Person`` class we index the person's name to allow auto completing in
+any order ("first last", "middle last first", ...). For the weight we use a
+value from the ``popularity`` field which is a long.
+
+To make the suggestions work in different languages we added a custom analyzer
+that does ascii folding.
+"""
+
+import asyncio
+import os
+from itertools import permutations
+from typing import TYPE_CHECKING, Any, Dict, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    Completion,
+    Keyword,
+    Long,
+    Text,
+    analyzer,
+    async_connections,
+    mapped_field,
+    token_filter,
+)
+
+# custom analyzer for names
+ascii_fold = analyzer(
+    "ascii_fold",
+    # we don't want to split O'Brian or Toulouse-Lautrec
+    tokenizer="whitespace",
+    filter=["lowercase", token_filter("ascii_fold", "asciifolding")],
+)
+
+
+class Person(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+    popularity: int = mapped_field(Long(), default=0)
+
+    # completion field with a custom analyzer
+    suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False)
+
+    def clean(self) -> None:
+        """
+        Automatically construct the suggestion input and weight by taking all
+        possible permutations of Person's name as ``input`` and taking their
+        popularity as ``weight``.
+        """
+        self.suggest = {
+            "input": [" ".join(p) for p in permutations(self.name.split())],
+            "weight": self.popularity,
+        }
+
+    class Index:
+        name = "test-suggest"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await Person.init()
+
+    # index some sample data
+    for id, (name, popularity) in enumerate(
+        [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)]
+    ):
+        await Person(_id=id, name=name, popularity=popularity).save()
+
+    # refresh index manually to make changes live
+    await Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Jara Cimr", "tou", "de hen"):
+        s = Person.search()
+        s = s.suggest("auto_complete", text, completion={"field": "suggest"})
+        response = await s.execute()
+
+        # print out all the options we got
+        for option in response.suggest["auto_complete"][0].options:
+            print("%10s: %25s (%d)" % (text, option._source.name, option._score))
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/composite_agg.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/composite_agg.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/composite_agg.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/composite_agg.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,93 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import asyncio
+import os
+from typing import Any, AsyncIterator, Dict, Mapping, Sequence, cast
+
+from elasticsearch.dsl import Agg, AsyncSearch, Response, aggs, async_connections
+from elasticsearch.dsl.types import CompositeAggregate
+from elasticsearch.helpers import async_bulk
+from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX
+
+
+async def scan_aggs(
+    search: AsyncSearch,
+    source_aggs: Sequence[Mapping[str, Agg]],
+    inner_aggs: Dict[str, Agg] = {},
+    size: int = 10,
+) -> AsyncIterator[CompositeAggregate]:
+    """
+    Helper function used to iterate over all possible bucket combinations of
+    ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
+    ``composite`` aggregation under the hood to perform this.
+    """
+
+    async def run_search(**kwargs: Any) -> Response:
+        s = search[:0]
+        bucket = s.aggs.bucket(
+            "comp",
+            aggs.Composite(
+                sources=source_aggs,
+                size=size,
+                **kwargs,
+            ),
+        )
+        for agg_name, agg in inner_aggs.items():
+            bucket[agg_name] = agg
+        return await s.execute()
+
+    response = await run_search()
+    while response.aggregations["comp"].buckets:
+        for b in response.aggregations["comp"].buckets:
+            yield cast(CompositeAggregate, b)
+        if "after_key" in response.aggregations["comp"]:
+            after = response.aggregations["comp"].after_key
+        else:
+            after = response.aggregations["comp"].buckets[-1].key
+        response = await run_search(after=after)
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    client = async_connections.create_connection(
+        hosts=[os.environ["ELASTICSEARCH_URL"]]
+    )
+
+    # create the index and populate it with some data
+    # note that the dataset is imported from the library's test suite
+    await client.indices.delete(index="git", ignore_unavailable=True)
+    await client.indices.create(index="git", **GIT_INDEX)
+    await async_bulk(client, DATA, raise_on_error=True, refresh=True)
+
+    # run some aggregations on the data
+    async for b in scan_aggs(
+        AsyncSearch(index="git"),
+        [{"files": aggs.Terms(field="files")}],
+        {"first_seen": aggs.Min(field="committed_date")},
+    ):
+        print(
+            "File %s has been modified %d times, first seen at %s."
+            % (b.key.files, b.doc_count, b.first_seen.value_as_string)
+        )
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/esql_employees.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/esql_employees.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/esql_employees.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/esql_employees.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,170 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# ES|QL query builder example
+
+Requirements:
+
+$ pip install "elasticsearch[async]" faker
+
+To run the example:
+
+$ python esql_employees.py "name to search"
+
+The index will be created automatically with a list of 1000 randomly generated
+employees if it does not exist. Add `--recreate-index` or `-r` to the command
+to regenerate it.
+
+Examples:
+
+$ python esql_employees "Mark"  # employees named Mark (first or last names)
+$ python esql_employees "Sarah" --limit 10  # up to 10 employees named Sarah
+$ python esql_employees "Sam" --sort height  # sort results by height
+$ python esql_employees "Sam" --sort name  # sort results by last name
+"""
+
+import argparse
+import asyncio
+import os
+import random
+
+from faker import Faker
+
+from elasticsearch.dsl import AsyncDocument, InnerDoc, M, async_connections
+from elasticsearch.esql import ESQLBase
+from elasticsearch.esql.functions import concat, multi_match
+
+fake = Faker()
+
+
+class Address(InnerDoc):
+    address: M[str]
+    city: M[str]
+    zip_code: M[str]
+
+
+class Employee(AsyncDocument):
+    emp_no: M[int]
+    first_name: M[str]
+    last_name: M[str]
+    height: M[float]
+    still_hired: M[bool]
+    address: M[Address]
+
+    class Index:
+        name = "employees"
+
+    @property
+    def name(self) -> str:
+        return f"{self.first_name} {self.last_name}"
+
+    def __repr__(self) -> str:
+        return f"<Employee[{self.meta.id}]: {self.first_name} {self.last_name}>"
+
+
+async def create(num_employees: int = 1000) -> None:
+    print("Creating a new employee index...")
+    if await Employee._index.exists():
+        await Employee._index.delete()
+    await Employee.init()
+
+    for i in range(num_employees):
+        address = Address(
+            address=fake.address(), city=fake.city(), zip_code=fake.zipcode()
+        )
+        emp = Employee(
+            emp_no=10000 + i,
+            first_name=fake.first_name(),
+            last_name=fake.last_name(),
+            height=int((random.random() * 0.8 + 1.5) * 1000) / 1000,
+            still_hired=random.random() >= 0.5,
+            address=address,
+        )
+        await emp.save()
+    await Employee._index.refresh()
+
+
+async def search(query: str, limit: int, sort: str) -> None:
+    q: ESQLBase = (
+        Employee.esql_from()
+        .where(multi_match(query, Employee.first_name, Employee.last_name))
+        .eval(full_name=concat(Employee.first_name, " ", Employee.last_name))
+    )
+    if sort == "height":
+        q = q.sort(Employee.height.desc())
+    elif sort == "name":
+        q = q.sort(Employee.last_name.asc())
+    q = q.limit(limit)
+    async for result in Employee.esql_execute(q, return_additional=True):
+        assert type(result) == tuple
+        employee = result[0]
+        full_name = result[1]["full_name"]
+        print(
+            f"{full_name:<20}",
+            f"{'Hired' if employee.still_hired else 'Not hired':<10}",
+            f"{employee.height:5.2f}m",
+            f"{employee.address.city:<20}",
+        )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Employee ES|QL example")
+    parser.add_argument(
+        "--recreate-index",
+        "-r",
+        action="store_true",
+        help="Recreate and populate the index",
+    )
+    parser.add_argument(
+        "--limit",
+        action="store",
+        type=int,
+        default=100,
+        help="Maximum number or employees to return (default: 100)",
+    )
+    parser.add_argument(
+        "--sort",
+        action="store",
+        type=str,
+        default=None,
+        help='Sort by "name" (ascending) or by "height" (descending) (default: no sorting)',
+    )
+    parser.add_argument(
+        "query", action="store", help="The name or partial name to search for"
+    )
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await Employee._index.exists():
+        await create()
+    await Employee.init()
+
+    await search(args.query, args.limit, args.sort)
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/parent_child.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/parent_child.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/parent_child.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/parent_child.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,276 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Complex data model example modeling stackoverflow-like data.
+
+It is used to showcase several key features of elasticsearch-dsl:
+
+    * Object and Nested fields: see User and Comment classes and fields they
+      are used in
+
+        * method add_comment is used to add comments
+
+    * Parent/Child relationship
+
+        * See the Join field on Post creating the relationship between Question
+          and Answer
+
+        * Meta.matches allows the hits from same index to be wrapped in proper
+          classes
+
+        * to see how child objects are created see Question.add_answer
+
+        * Question.search_answers shows how to query for children of a
+          particular parent
+
+"""
+import asyncio
+import os
+from datetime import datetime
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    Date,
+    InnerDoc,
+    Join,
+    Keyword,
+    Long,
+    Text,
+    async_connections,
+    mapped_field,
+)
+
+
+class User(InnerDoc):
+    """
+    Class used to represent a denormalized user stored on other objects.
+    """
+
+    id: int = mapped_field(Long())
+    signed_up: Optional[datetime] = mapped_field(Date())
+    username: str = mapped_field(Text(fields={"keyword": Keyword()}))
+    email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+    location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+
+
+class Comment(InnerDoc):
+    """
+    Class wrapper for nested comment objects.
+    """
+
+    author: User
+    created: datetime
+    content: str
+
+
+class Post(AsyncDocument):
+    """
+    Base class for Question and Answer containing the common fields.
+    """
+
+    author: User
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _routing: str = mapped_field(default=None)
+        _id: Optional[int] = mapped_field(default=None)
+
+    created: Optional[datetime] = mapped_field(default=None)
+    body: str = mapped_field(default="")
+    comments: List[Comment] = mapped_field(default_factory=list)
+    question_answer: Any = mapped_field(
+        Join(relations={"question": "answer"}), default_factory=dict
+    )
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # Post is an abstract class, make sure it never gets used for
+        # deserialization
+        return False
+
+    class Index:
+        name = "test-qa-site"
+        settings = {
+            "number_of_shards": 1,
+            "number_of_replicas": 0,
+        }
+
+    async def add_comment(
+        self,
+        user: User,
+        content: str,
+        created: Optional[datetime] = None,
+        commit: Optional[bool] = True,
+    ) -> Comment:
+        c = Comment(author=user, content=content, created=created or datetime.now())
+        self.comments.append(c)
+        if commit:
+            await self.save()
+        return c
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # if there is no date, use now
+        if self.created is None:
+            self.created = datetime.now()
+        await super().save(**kwargs)
+
+
+class Question(Post):
+    tags: List[str] = mapped_field(
+        default_factory=list
+    )  # .tags will return empty list if not present
+    title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Question class for parent documents"""
+        return bool(hit["_source"]["question_answer"] == "question")
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> AsyncSearch:  # type: ignore[override]
+        return cls._index.search(**kwargs).filter("term", question_answer="question")
+
+    async def add_answer(
+        self,
+        user: User,
+        body: str,
+        created: Optional[datetime] = None,
+        accepted: bool = False,
+        commit: Optional[bool] = True,
+    ) -> "Answer":
+        answer = Answer(
+            # required make sure the answer is stored in the same shard
+            _routing=self.meta.id,
+            # set up the parent/child mapping
+            question_answer={"name": "answer", "parent": self.meta.id},
+            # pass in the field values
+            author=user,
+            created=created,
+            body=body,
+            is_accepted=accepted,
+        )
+        if commit:
+            await answer.save()
+        return answer
+
+    def search_answers(self) -> AsyncSearch:
+        # search only our index
+        s = Answer.search()
+        # filter for answers belonging to us
+        s = s.filter("parent_id", type="answer", id=self.meta.id)
+        # add routing to only go to specific shard
+        s = s.params(routing=self.meta.id)
+        return s
+
+    async def get_answers(self) -> List[Any]:
+        """
+        Get answers either from inner_hits already present or by searching
+        elasticsearch.
+        """
+        if "inner_hits" in self.meta and "answer" in self.meta.inner_hits:
+            return cast(List[Any], self.meta.inner_hits["answer"].hits)
+        return [a async for a in self.search_answers()]
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        self.question_answer = "question"
+        await super().save(**kwargs)
+
+
+class Answer(Post):
+    is_accepted: bool = mapped_field(default=False)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Answer class for child documents with child name 'answer'"""
+        return (
+            isinstance(hit["_source"]["question_answer"], dict)
+            and hit["_source"]["question_answer"].get("name") == "answer"
+        )
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> AsyncSearch:  # type: ignore[override]
+        return cls._index.search(**kwargs).exclude("term", question_answer="question")
+
+    async def get_question(self) -> Optional[Question]:
+        # cache question in self.meta
+        # any attributes set on self would be interpreted as fields
+        if "question" not in self.meta:
+            self.meta.question = await Question.get(
+                id=self.question_answer.parent, index=self.meta.index
+            )
+        return cast(Optional[Question], self.meta.question)
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # set routing to parents id automatically
+        self.meta.routing = self.question_answer.parent
+        await super().save(**kwargs)
+
+
+async def setup() -> None:
+    """Create an IndexTemplate and save it into elasticsearch."""
+    index_template = Post._index.as_composable_template("base", priority=100)
+    await index_template.save()
+
+
+async def main() -> Answer:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create index
+    await setup()
+
+    # user objects to use
+    nick = User(
+        id=47,
+        signed_up=datetime(2017, 4, 3),
+        username="fxdgear",
+        email="nick.lang@elastic.co",
+        location="Colorado",
+    )
+    honza = User(
+        id=42,
+        signed_up=datetime(2013, 4, 3),
+        username="honzakral",
+        email="honza@elastic.co",
+        location="Prague",
+    )
+
+    # create a question object
+    question = Question(
+        _id=1,
+        author=nick,
+        tags=["elasticsearch", "python"],
+        title="How do I use elasticsearch from Python?",
+        body="""
+        I want to use elasticsearch, how do I do it from Python?
+        """,
+    )
+    await question.save()
+    answer = await question.add_answer(honza, "Just use `elasticsearch-py`!")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+    return answer
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/percolate.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/percolate.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/percolate.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/percolate.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,117 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import asyncio
+import os
+from typing import TYPE_CHECKING, Any, List, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    Keyword,
+    Percolator,
+    Q,
+    Query,
+    async_connections,
+    mapped_field,
+)
+
+
+class BlogPost(AsyncDocument):
+    """
+    Blog posts that will be automatically tagged based on percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    content: Optional[str]
+    tags: List[str] = mapped_field(Keyword(), default_factory=list)
+
+    class Index:
+        name = "test-blogpost"
+
+    async def add_tags(self) -> None:
+        # run a percolation to automatically tag the blog post.
+        s = AsyncSearch(index="test-percolator")
+        s = s.query(
+            "percolate", field="query", index=self._get_index(), document=self.to_dict()
+        )
+
+        # collect all the tags from matched percolators
+        async for percolator in s:
+            self.tags.extend(percolator.tags)
+
+        # make sure tags are unique
+        self.tags = list(set(self.tags))
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        await self.add_tags()
+        await super().save(**kwargs)
+
+
+class PercolatorDoc(AsyncDocument):
+    """
+    Document class used for storing the percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        _id: str
+
+    # relevant fields from BlogPost must be also present here for the queries
+    # to be able to use them. Another option would be to use document
+    # inheritance but save() would have to be reset to normal behavior.
+    content: Optional[str]
+
+    # the percolator query to be run against the doc
+    query: Query = mapped_field(Percolator())
+    # list of tags to append to a document
+    tags: List[str] = mapped_field(Keyword(multi=True))
+
+    class Index:
+        name = "test-percolator"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def setup() -> None:
+    # create the percolator index if it doesn't exist
+    if not await PercolatorDoc._index.exists():
+        await PercolatorDoc.init()
+
+    # register a percolation query looking for documents about python
+    await PercolatorDoc(
+        _id="python",
+        tags=["programming", "development", "python"],
+        content="",
+        query=Q("match", content="python"),
+    ).save(refresh=True)
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    await setup()
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/search_as_you_type.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/search_as_you_type.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/search_as_you_type.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/search_as_you_type.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,99 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with search_as_you_type field datatype and how to search it.
+
+When creating a field with search_as_you_type datatype ElasticSearch creates additional
+subfields to enable efficient as-you-type completion, matching terms at any position
+within the input.
+
+To custom analyzer with ascii folding allow search to work in different languages.
+"""
+
+import asyncio
+import os
+from typing import TYPE_CHECKING, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    SearchAsYouType,
+    async_connections,
+    mapped_field,
+)
+from elasticsearch.dsl.query import MultiMatch
+
+
+class Person(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="")
+
+    class Index:
+        name = "test-search-as-you-type"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await Person.init()
+
+    import pprint
+
+    pprint.pprint(Person().to_dict(), indent=2)
+
+    # index some sample data
+    names = [
+        "Andy Warhol",
+        "Alphonse Mucha",
+        "Henri de Toulouse-Lautrec",
+        "Jára Cimrman",
+    ]
+    for id, name in enumerate(names):
+        await Person(_id=id, name=name).save()
+
+    # refresh index manually to make changes live
+    await Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"):
+        s = Person.search()
+
+        s.query = MultiMatch(  # type: ignore[assignment]
+            query=text,
+            type="bool_prefix",
+            fields=["name", "name._2gram", "name._3gram"],
+        )
+
+        response = await s.execute()
+
+        # print out all the options we got
+        for h in response:
+            print("%15s: %25s" % (text, h.name))
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/semantic_text.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/semantic_text.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/semantic_text.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/semantic_text.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,148 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+"""
+# Semantic Text example
+
+Requirements:
+
+$ pip install "elasticsearch[async]" tqdm
+
+Before running this example, an ELSER inference endpoint must be created in the
+Elasticsearch cluster. This can be done manually from Kibana, or with the
+following curl command from a terminal:
+
+curl -X PUT \
+  "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \
+  -H "Content-Type: application/json" \
+  -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}'
+
+To run the example:
+
+$ python semantic_text.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to the command to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python semantic_text.py "work from home"
+$ python semantic_text.py "vacation time"
+$ python semantic_text.py "can I bring a bird to work?"
+
+When the index is created, the inference service will split the documents into
+short passages, and for each passage a sparse embedding will be generated using
+Elastic's ELSER v2 model.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, Optional
+from urllib.request import urlopen
+
+from tqdm import tqdm
+
+from elasticsearch import dsl
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+
+class WorkplaceDoc(dsl.AsyncDocument):
+    class Index:
+        name = "workplace_documents_semantic"
+
+    name: str
+    summary: str
+    content: Any = dsl.mapped_field(
+        dsl.field.SemanticText(inference_id="my-elser-endpoint")
+    )
+    created: datetime
+    updated: Optional[datetime]
+    url: str = dsl.mapped_field(dsl.Keyword())
+    category: str = dsl.mapped_field(dsl.Keyword())
+
+
+async def create() -> None:
+
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+    # refresh the index
+    await WorkplaceDoc._index.refresh()
+
+
+async def search(query: str) -> dsl.AsyncSearch[WorkplaceDoc]:
+    search = WorkplaceDoc.search()
+    search = search[:5]
+    return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query))
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    dsl.async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Content: {hit.content.text}")
+        print("--------------------\n")
+
+    # close the connection
+    await dsl.async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/sparse_vectors.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/sparse_vectors.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/sparse_vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/sparse_vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,198 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Sparse vector database example
+
+Requirements:
+
+$ pip install nltk tqdm "elasticsearch[async]"
+
+Before running this example, the ELSER v2 model must be downloaded and deployed
+to the Elasticsearch cluster, and an ingest pipeline must be defined. This can
+be done manually from Kibana, or with the following three curl commands from a
+terminal, adjusting the endpoint as needed:
+
+curl -X PUT \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \
+  -H "Content-Type: application/json" \
+  -d '{"input":{"field_names":["text_field"]}}'
+curl -X POST \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated"
+curl -X PUT \
+  "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \
+  -H "Content-Type: application/json" \
+  -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}'
+
+To run the example:
+
+$ python sparse_vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python sparse_vectors.py "work from home"
+$ python sparse_vectors.py "vacation time"
+$ python sparse_vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage a sparse embedding is generated using Elastic's ELSER v2 model.
+The documents that are returned as search results are those that have the
+highest scored passages. Add `--show-inner-hits` to the command to see
+individual passage results as well.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, Dict, List, Optional
+from urllib.request import urlopen
+
+import nltk
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    InnerDoc,
+    Keyword,
+    Q,
+    SparseVector,
+    async_connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+
+class Passage(InnerDoc):
+    content: Optional[str]
+    embedding: Dict[str, float] = mapped_field(SparseVector(), init=False)
+
+
+class WorkplaceDoc(AsyncDocument):
+    class Index:
+        name = "workplace_documents_sparse"
+        settings = {"default_pipeline": "elser_ingest_pipeline"}
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword())
+    category: str = mapped_field(Keyword())
+    passages: List[Passage] = mapped_field(default=[])
+
+    _model: Any = None
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = nltk.sent_tokenize(self.content)
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(Passage(content=passage))
+
+
+async def create() -> None:
+
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+
+async def search(query: str) -> AsyncSearch[WorkplaceDoc]:
+    return WorkplaceDoc.search()[:5].query(
+        "nested",
+        path="passages",
+        query=Q(
+            "text_expansion",
+            passages__content={
+                "model_id": ".elser_model_2",
+                "model_text": query,
+            },
+        ),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/vectors.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/vectors.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples/vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,187 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Vector database example
+
+Requirements:
+
+$ pip install nltk sentence_transformers tqdm "elasticsearch[async]"
+
+To run the example:
+
+$ python vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python vectors.py "work from home"
+$ python vectors.py "vacation time"
+$ python vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage an embedding is generated using the open source
+"all-MiniLM-L6-v2" model. The documents that are returned as search results are
+those that have the highest scored passages. Add `--show-inner-hits` to the
+command to see individual passage results as well.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, List, Optional, cast
+from urllib.request import urlopen
+
+import nltk
+from sentence_transformers import SentenceTransformer
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    DenseVector,
+    InnerDoc,
+    Keyword,
+    M,
+    async_connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+MODEL_NAME = "all-MiniLM-L6-v2"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+# this will be the embedding model
+embedding_model: Any = None
+
+
+class Passage(InnerDoc):
+    content: str
+    embedding: List[float] = mapped_field(DenseVector())
+
+
+class WorkplaceDoc(AsyncDocument):
+    class Index:
+        name = "workplace_documents"
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword(required=True))
+    category: str = mapped_field(Keyword(required=True))
+    passages: M[List[Passage]] = mapped_field(default=[])
+
+    @classmethod
+    def get_embedding(cls, input: str) -> List[float]:
+        global embedding_model
+        if embedding_model is None:
+            embedding_model = SentenceTransformer(MODEL_NAME)
+        return cast(List[float], list(embedding_model.encode(input)))
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = cast(List[str], nltk.sent_tokenize(self.content))
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(
+                Passage(content=passage, embedding=self.get_embedding(passage))
+            )
+
+
+async def create() -> None:
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+
+async def search(query: str) -> AsyncSearch[WorkplaceDoc]:
+    return WorkplaceDoc.search().knn(
+        field=WorkplaceDoc.passages.embedding,
+        k=5,
+        num_candidates=50,
+        query_vector=list(WorkplaceDoc.get_embedding(query)),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/README.rst 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/README.rst
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/README.rst	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/README.rst	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,47 @@
+Elasticsearch DSL Examples
+==========================
+
+In this directory you can see several complete examples demonstrating key
+concepts and patterns exposed by ``elasticsearch-dsl``.
+
+``alias_migration.py``
+----------------------
+
+The alias migration example shows a useful pattern where we use versioned
+indices (``test-blog-0``, ``test-blog-1``, ...) to manage schema changes and
+hides that behind an alias so that the application doesn't have to be aware of
+the versions and just refer to the ``test-blog`` alias for both read and write
+operations.
+
+For simplicity we use a timestamp as version in the index name.
+
+``parent_child.py``
+-------------------
+
+More complex example highlighting the possible relationships available in
+elasticsearch - `parent/child
+<https://www.elastic.co/guide/en/elasticsearch/reference/6.3/nested.html>`_ and
+`nested
+<https://www.elastic.co/guide/en/elasticsearch/reference/6.3/nested.html>`_.
+
+``composite_agg.py``
+--------------------
+
+A helper function using the `composite aggregation
+<https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-composite-aggregation.html>`_
+to paginate over aggregation results.
+
+``percolate.py``
+----------------
+
+A ``BlogPost`` document with automatic classification using the `percolator
+<https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-percolate-query.html>`_
+functionality.
+
+``completion.py``
+-----------------
+
+As example using `completion suggester
+<https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters-completion.html>`_
+to auto complete people's names.
+
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/alias_migration.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/alias_migration.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/alias_migration.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/alias_migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,161 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Simple example with a single Document demonstrating how schema can be managed,
+including upgrading with reindexing.
+
+Key concepts:
+
+    * setup() function to first initialize the schema (as index template) in
+      elasticsearch. Can be called any time (recommended with every deploy of
+      your app).
+
+    * migrate() function to be called any time when the schema changes - it
+      will create a new index (by incrementing the version) and update the alias.
+      By default it will also (before flipping the alias) move the data from the
+      previous index to the new one.
+
+    * BlogPost._matches() class method is required for this code to work since
+      otherwise BlogPost will not be used to deserialize the documents as those
+      will have index set to the concrete index whereas the class refers to the
+      alias.
+"""
+import os
+from datetime import datetime
+from fnmatch import fnmatch
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+
+from elasticsearch.dsl import Document, Keyword, connections, mapped_field
+
+ALIAS = "test-blog"
+PATTERN = ALIAS + "-*"
+PRIORITY = 100
+
+
+class BlogPost(Document):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    title: str
+    tags: List[str] = mapped_field(Keyword())
+    content: str
+    published: Optional[datetime] = mapped_field(default=None)
+
+    def is_published(self) -> bool:
+        return bool(self.published and datetime.now() > self.published)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # override _matches to match indices in a pattern instead of just ALIAS
+        # hit is the raw dict as returned by elasticsearch
+        return fnmatch(hit["_index"], PATTERN)
+
+    class Index:
+        # we will use an alias instead of the index
+        name = ALIAS
+        # set settings and possibly other attributes of the index like
+        # analyzers
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+def setup() -> None:
+    """
+    Create the index template in elasticsearch specifying the mappings and any
+    settings to be used. This can be run at any time, ideally at every new code
+    deploy.
+    """
+    # create an index template
+    index_template = BlogPost._index.as_composable_template(
+        ALIAS, PATTERN, priority=PRIORITY
+    )
+    # upload the template into elasticsearch
+    # potentially overriding the one already there
+    index_template.save()
+
+    # create the first index if it doesn't exist
+    if not BlogPost._index.exists():
+        migrate(move_data=False)
+
+
+def migrate(move_data: bool = True, update_alias: bool = True) -> None:
+    """
+    Upgrade function that creates a new index for the data. Optionally it also can
+    (and by default will) reindex previous copy of the data into the new index
+    (specify ``move_data=False`` to skip this step) and update the alias to
+    point to the latest index (set ``update_alias=False`` to skip).
+
+    Note that while this function is running the application can still perform
+    any and all searches without any loss of functionality. It should, however,
+    not perform any writes at this time as those might be lost.
+    """
+    # construct a new index name by appending current timestamp
+    next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f"))
+
+    # get the low level connection
+    es = connections.get_connection()
+
+    # create new index, it will use the settings from the template
+    es.indices.create(index=next_index)
+
+    if move_data:
+        # move data from current alias to the new index
+        es.options(request_timeout=3600).reindex(
+            body={"source": {"index": ALIAS}, "dest": {"index": next_index}}
+        )
+        # refresh the index to make the changes visible
+        es.indices.refresh(index=next_index)
+
+    if update_alias:
+        # repoint the alias to point to the newly created index
+        es.indices.update_aliases(
+            body={
+                "actions": [
+                    {"remove": {"alias": ALIAS, "index": PATTERN}},
+                    {"add": {"alias": ALIAS, "index": next_index}},
+                ]
+            }
+        )
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    setup()
+
+    # create a new document
+    bp = BlogPost(
+        _id=0,
+        title="Hello World!",
+        tags=["testing", "dummy"],
+        content=open(__file__).read(),
+    )
+    bp.save(refresh=True)
+
+    # create new index
+    migrate()
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/alias_migration.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/alias_migration.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/alias_migration.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/alias_migration.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,162 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Simple example with a single Document demonstrating how schema can be managed,
+including upgrading with reindexing.
+
+Key concepts:
+
+    * setup() function to first initialize the schema (as index template) in
+      elasticsearch. Can be called any time (recommended with every deploy of
+      your app).
+
+    * migrate() function to be called any time when the schema changes - it
+      will create a new index (by incrementing the version) and update the alias.
+      By default it will also (before flipping the alias) move the data from the
+      previous index to the new one.
+
+    * BlogPost._matches() class method is required for this code to work since
+      otherwise BlogPost will not be used to deserialize the documents as those
+      will have index set to the concrete index whereas the class refers to the
+      alias.
+"""
+import asyncio
+import os
+from datetime import datetime
+from fnmatch import fnmatch
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+
+from elasticsearch.dsl import AsyncDocument, Keyword, async_connections, mapped_field
+
+ALIAS = "test-blog"
+PATTERN = ALIAS + "-*"
+PRIORITY = 100
+
+
+class BlogPost(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    title: str
+    tags: List[str] = mapped_field(Keyword())
+    content: str
+    published: Optional[datetime] = mapped_field(default=None)
+
+    def is_published(self) -> bool:
+        return bool(self.published and datetime.now() > self.published)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # override _matches to match indices in a pattern instead of just ALIAS
+        # hit is the raw dict as returned by elasticsearch
+        return fnmatch(hit["_index"], PATTERN)
+
+    class Index:
+        # we will use an alias instead of the index
+        name = ALIAS
+        # set settings and possibly other attributes of the index like
+        # analyzers
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def setup() -> None:
+    """
+    Create the index template in elasticsearch specifying the mappings and any
+    settings to be used. This can be run at any time, ideally at every new code
+    deploy.
+    """
+    # create an index template
+    index_template = BlogPost._index.as_composable_template(
+        ALIAS, PATTERN, priority=PRIORITY
+    )
+    # upload the template into elasticsearch
+    # potentially overriding the one already there
+    await index_template.save()
+
+    # create the first index if it doesn't exist
+    if not await BlogPost._index.exists():
+        await migrate(move_data=False)
+
+
+async def migrate(move_data: bool = True, update_alias: bool = True) -> None:
+    """
+    Upgrade function that creates a new index for the data. Optionally it also can
+    (and by default will) reindex previous copy of the data into the new index
+    (specify ``move_data=False`` to skip this step) and update the alias to
+    point to the latest index (set ``update_alias=False`` to skip).
+
+    Note that while this function is running the application can still perform
+    any and all searches without any loss of functionality. It should, however,
+    not perform any writes at this time as those might be lost.
+    """
+    # construct a new index name by appending current timestamp
+    next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f"))
+
+    # get the low level connection
+    es = async_connections.get_connection()
+
+    # create new index, it will use the settings from the template
+    await es.indices.create(index=next_index)
+
+    if move_data:
+        # move data from current alias to the new index
+        await es.options(request_timeout=3600).reindex(
+            body={"source": {"index": ALIAS}, "dest": {"index": next_index}}
+        )
+        # refresh the index to make the changes visible
+        await es.indices.refresh(index=next_index)
+
+    if update_alias:
+        # repoint the alias to point to the newly created index
+        await es.indices.update_aliases(
+            body={
+                "actions": [
+                    {"remove": {"alias": ALIAS, "index": PATTERN}},
+                    {"add": {"alias": ALIAS, "index": next_index}},
+                ]
+            }
+        )
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await setup()
+
+    # create a new document
+    bp = BlogPost(
+        _id=0,
+        title="Hello World!",
+        tags=["testing", "dummy"],
+        content=open(__file__).read(),
+    )
+    await bp.save(refresh=True)
+
+    # create new index
+    await migrate()
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/completion.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/completion.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/completion.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/completion.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,114 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with completion suggester.
+
+In the ``Person`` class we index the person's name to allow auto completing in
+any order ("first last", "middle last first", ...). For the weight we use a
+value from the ``popularity`` field which is a long.
+
+To make the suggestions work in different languages we added a custom analyzer
+that does ascii folding.
+"""
+
+import asyncio
+import os
+from itertools import permutations
+from typing import TYPE_CHECKING, Any, Dict, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    Completion,
+    Keyword,
+    Long,
+    Text,
+    analyzer,
+    async_connections,
+    mapped_field,
+    token_filter,
+)
+
+# custom analyzer for names
+ascii_fold = analyzer(
+    "ascii_fold",
+    # we don't want to split O'Brian or Toulouse-Lautrec
+    tokenizer="whitespace",
+    filter=["lowercase", token_filter("ascii_fold", "asciifolding")],
+)
+
+
+class Person(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+    popularity: int = mapped_field(Long(), default=0)
+
+    # completion field with a custom analyzer
+    suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False)
+
+    def clean(self) -> None:
+        """
+        Automatically construct the suggestion input and weight by taking all
+        possible permutations of Person's name as ``input`` and taking their
+        popularity as ``weight``.
+        """
+        self.suggest = {
+            "input": [" ".join(p) for p in permutations(self.name.split())],
+            "weight": self.popularity,
+        }
+
+    class Index:
+        name = "test-suggest"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await Person.init()
+
+    # index some sample data
+    for id, (name, popularity) in enumerate(
+        [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)]
+    ):
+        await Person(_id=id, name=name, popularity=popularity).save()
+
+    # refresh index manually to make changes live
+    await Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Jara Cimr", "tou", "de hen"):
+        s = Person.search()
+        s = s.suggest("auto_complete", text, completion={"field": "suggest"})
+        response = await s.execute()
+
+        # print out all the options we got
+        for option in response.suggest["auto_complete"][0].options:
+            print("%10s: %25s (%d)" % (text, option._source.name, option._score))
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/composite_agg.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/composite_agg.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/composite_agg.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/composite_agg.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,93 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import asyncio
+import os
+from typing import Any, AsyncIterator, Dict, Mapping, Sequence, cast
+
+from elasticsearch.dsl import Agg, AsyncSearch, Response, aggs, async_connections
+from elasticsearch.dsl.types import CompositeAggregate
+from elasticsearch.helpers import async_bulk
+from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX
+
+
+async def scan_aggs(
+    search: AsyncSearch,
+    source_aggs: Sequence[Mapping[str, Agg]],
+    inner_aggs: Dict[str, Agg] = {},
+    size: int = 10,
+) -> AsyncIterator[CompositeAggregate]:
+    """
+    Helper function used to iterate over all possible bucket combinations of
+    ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
+    ``composite`` aggregation under the hood to perform this.
+    """
+
+    async def run_search(**kwargs: Any) -> Response:
+        s = search[:0]
+        bucket = s.aggs.bucket(
+            "comp",
+            aggs.Composite(
+                sources=source_aggs,
+                size=size,
+                **kwargs,
+            ),
+        )
+        for agg_name, agg in inner_aggs.items():
+            bucket[agg_name] = agg
+        return await s.execute()
+
+    response = await run_search()
+    while response.aggregations["comp"].buckets:
+        for b in response.aggregations["comp"].buckets:
+            yield cast(CompositeAggregate, b)
+        if "after_key" in response.aggregations["comp"]:
+            after = response.aggregations["comp"].after_key
+        else:
+            after = response.aggregations["comp"].buckets[-1].key
+        response = await run_search(after=after)
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    client = async_connections.create_connection(
+        hosts=[os.environ["ELASTICSEARCH_URL"]]
+    )
+
+    # create the index and populate it with some data
+    # note that the dataset is imported from the library's test suite
+    await client.indices.delete(index="git", ignore_unavailable=True)
+    await client.indices.create(index="git", **GIT_INDEX)
+    await async_bulk(client, DATA, raise_on_error=True, refresh=True)
+
+    # run some aggregations on the data
+    async for b in scan_aggs(
+        AsyncSearch(index="git"),
+        [{"files": aggs.Terms(field="files")}],
+        {"first_seen": aggs.Min(field="committed_date")},
+    ):
+        print(
+            "File %s has been modified %d times, first seen at %s."
+            % (b.key.files, b.doc_count, b.first_seen.value_as_string)
+        )
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/esql_employees.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/esql_employees.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/esql_employees.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/esql_employees.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,170 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# ES|QL query builder example
+
+Requirements:
+
+$ pip install "elasticsearch[async]" faker
+
+To run the example:
+
+$ python esql_employees.py "name to search"
+
+The index will be created automatically with a list of 1000 randomly generated
+employees if it does not exist. Add `--recreate-index` or `-r` to the command
+to regenerate it.
+
+Examples:
+
+$ python esql_employees "Mark"  # employees named Mark (first or last names)
+$ python esql_employees "Sarah" --limit 10  # up to 10 employees named Sarah
+$ python esql_employees "Sam" --sort height  # sort results by height
+$ python esql_employees "Sam" --sort name  # sort results by last name
+"""
+
+import argparse
+import asyncio
+import os
+import random
+
+from faker import Faker
+
+from elasticsearch.dsl import AsyncDocument, InnerDoc, M, async_connections
+from elasticsearch.esql import ESQLBase
+from elasticsearch.esql.functions import concat, multi_match
+
+fake = Faker()
+
+
+class Address(InnerDoc):
+    address: M[str]
+    city: M[str]
+    zip_code: M[str]
+
+
+class Employee(AsyncDocument):
+    emp_no: M[int]
+    first_name: M[str]
+    last_name: M[str]
+    height: M[float]
+    still_hired: M[bool]
+    address: M[Address]
+
+    class Index:
+        name = "employees"
+
+    @property
+    def name(self) -> str:
+        return f"{self.first_name} {self.last_name}"
+
+    def __repr__(self) -> str:
+        return f"<Employee[{self.meta.id}]: {self.first_name} {self.last_name}>"
+
+
+async def create(num_employees: int = 1000) -> None:
+    print("Creating a new employee index...")
+    if await Employee._index.exists():
+        await Employee._index.delete()
+    await Employee.init()
+
+    for i in range(num_employees):
+        address = Address(
+            address=fake.address(), city=fake.city(), zip_code=fake.zipcode()
+        )
+        emp = Employee(
+            emp_no=10000 + i,
+            first_name=fake.first_name(),
+            last_name=fake.last_name(),
+            height=int((random.random() * 0.8 + 1.5) * 1000) / 1000,
+            still_hired=random.random() >= 0.5,
+            address=address,
+        )
+        await emp.save()
+    await Employee._index.refresh()
+
+
+async def search(query: str, limit: int, sort: str) -> None:
+    q: ESQLBase = (
+        Employee.esql_from()
+        .where(multi_match(query, Employee.first_name, Employee.last_name))
+        .eval(full_name=concat(Employee.first_name, " ", Employee.last_name))
+    )
+    if sort == "height":
+        q = q.sort(Employee.height.desc())
+    elif sort == "name":
+        q = q.sort(Employee.last_name.asc())
+    q = q.limit(limit)
+    async for result in Employee.esql_execute(q, return_additional=True):
+        assert type(result) == tuple
+        employee = result[0]
+        full_name = result[1]["full_name"]
+        print(
+            f"{full_name:<20}",
+            f"{'Hired' if employee.still_hired else 'Not hired':<10}",
+            f"{employee.height:5.2f}m",
+            f"{employee.address.city:<20}",
+        )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Employee ES|QL example")
+    parser.add_argument(
+        "--recreate-index",
+        "-r",
+        action="store_true",
+        help="Recreate and populate the index",
+    )
+    parser.add_argument(
+        "--limit",
+        action="store",
+        type=int,
+        default=100,
+        help="Maximum number or employees to return (default: 100)",
+    )
+    parser.add_argument(
+        "--sort",
+        action="store",
+        type=str,
+        default=None,
+        help='Sort by "name" (ascending) or by "height" (descending) (default: no sorting)',
+    )
+    parser.add_argument(
+        "query", action="store", help="The name or partial name to search for"
+    )
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await Employee._index.exists():
+        await create()
+    await Employee.init()
+
+    await search(args.query, args.limit, args.sort)
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/parent_child.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/parent_child.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/parent_child.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/parent_child.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,276 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Complex data model example modeling stackoverflow-like data.
+
+It is used to showcase several key features of elasticsearch-dsl:
+
+    * Object and Nested fields: see User and Comment classes and fields they
+      are used in
+
+        * method add_comment is used to add comments
+
+    * Parent/Child relationship
+
+        * See the Join field on Post creating the relationship between Question
+          and Answer
+
+        * Meta.matches allows the hits from same index to be wrapped in proper
+          classes
+
+        * to see how child objects are created see Question.add_answer
+
+        * Question.search_answers shows how to query for children of a
+          particular parent
+
+"""
+import asyncio
+import os
+from datetime import datetime
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    Date,
+    InnerDoc,
+    Join,
+    Keyword,
+    Long,
+    Text,
+    async_connections,
+    mapped_field,
+)
+
+
+class User(InnerDoc):
+    """
+    Class used to represent a denormalized user stored on other objects.
+    """
+
+    id: int = mapped_field(Long())
+    signed_up: Optional[datetime] = mapped_field(Date())
+    username: str = mapped_field(Text(fields={"keyword": Keyword()}))
+    email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+    location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+
+
+class Comment(InnerDoc):
+    """
+    Class wrapper for nested comment objects.
+    """
+
+    author: User
+    created: datetime
+    content: str
+
+
+class Post(AsyncDocument):
+    """
+    Base class for Question and Answer containing the common fields.
+    """
+
+    author: User
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _routing: str = mapped_field(default=None)
+        _id: Optional[int] = mapped_field(default=None)
+
+    created: Optional[datetime] = mapped_field(default=None)
+    body: str = mapped_field(default="")
+    comments: List[Comment] = mapped_field(default_factory=list)
+    question_answer: Any = mapped_field(
+        Join(relations={"question": "answer"}), default_factory=dict
+    )
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # Post is an abstract class, make sure it never gets used for
+        # deserialization
+        return False
+
+    class Index:
+        name = "test-qa-site"
+        settings = {
+            "number_of_shards": 1,
+            "number_of_replicas": 0,
+        }
+
+    async def add_comment(
+        self,
+        user: User,
+        content: str,
+        created: Optional[datetime] = None,
+        commit: Optional[bool] = True,
+    ) -> Comment:
+        c = Comment(author=user, content=content, created=created or datetime.now())
+        self.comments.append(c)
+        if commit:
+            await self.save()
+        return c
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # if there is no date, use now
+        if self.created is None:
+            self.created = datetime.now()
+        await super().save(**kwargs)
+
+
+class Question(Post):
+    tags: List[str] = mapped_field(
+        default_factory=list
+    )  # .tags will return empty list if not present
+    title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Question class for parent documents"""
+        return bool(hit["_source"]["question_answer"] == "question")
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> AsyncSearch:  # type: ignore[override]
+        return cls._index.search(**kwargs).filter("term", question_answer="question")
+
+    async def add_answer(
+        self,
+        user: User,
+        body: str,
+        created: Optional[datetime] = None,
+        accepted: bool = False,
+        commit: Optional[bool] = True,
+    ) -> "Answer":
+        answer = Answer(
+            # required make sure the answer is stored in the same shard
+            _routing=self.meta.id,
+            # set up the parent/child mapping
+            question_answer={"name": "answer", "parent": self.meta.id},
+            # pass in the field values
+            author=user,
+            created=created,
+            body=body,
+            is_accepted=accepted,
+        )
+        if commit:
+            await answer.save()
+        return answer
+
+    def search_answers(self) -> AsyncSearch:
+        # search only our index
+        s = Answer.search()
+        # filter for answers belonging to us
+        s = s.filter("parent_id", type="answer", id=self.meta.id)
+        # add routing to only go to specific shard
+        s = s.params(routing=self.meta.id)
+        return s
+
+    async def get_answers(self) -> List[Any]:
+        """
+        Get answers either from inner_hits already present or by searching
+        elasticsearch.
+        """
+        if "inner_hits" in self.meta and "answer" in self.meta.inner_hits:
+            return cast(List[Any], self.meta.inner_hits["answer"].hits)
+        return [a async for a in self.search_answers()]
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        self.question_answer = "question"
+        await super().save(**kwargs)
+
+
+class Answer(Post):
+    is_accepted: bool = mapped_field(default=False)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Answer class for child documents with child name 'answer'"""
+        return (
+            isinstance(hit["_source"]["question_answer"], dict)
+            and hit["_source"]["question_answer"].get("name") == "answer"
+        )
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> AsyncSearch:  # type: ignore[override]
+        return cls._index.search(**kwargs).exclude("term", question_answer="question")
+
+    async def get_question(self) -> Optional[Question]:
+        # cache question in self.meta
+        # any attributes set on self would be interpreted as fields
+        if "question" not in self.meta:
+            self.meta.question = await Question.get(
+                id=self.question_answer.parent, index=self.meta.index
+            )
+        return cast(Optional[Question], self.meta.question)
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # set routing to parents id automatically
+        self.meta.routing = self.question_answer.parent
+        await super().save(**kwargs)
+
+
+async def setup() -> None:
+    """Create an IndexTemplate and save it into elasticsearch."""
+    index_template = Post._index.as_composable_template("base", priority=100)
+    await index_template.save()
+
+
+async def main() -> Answer:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create index
+    await setup()
+
+    # user objects to use
+    nick = User(
+        id=47,
+        signed_up=datetime(2017, 4, 3),
+        username="fxdgear",
+        email="nick.lang@elastic.co",
+        location="Colorado",
+    )
+    honza = User(
+        id=42,
+        signed_up=datetime(2013, 4, 3),
+        username="honzakral",
+        email="honza@elastic.co",
+        location="Prague",
+    )
+
+    # create a question object
+    question = Question(
+        _id=1,
+        author=nick,
+        tags=["elasticsearch", "python"],
+        title="How do I use elasticsearch from Python?",
+        body="""
+        I want to use elasticsearch, how do I do it from Python?
+        """,
+    )
+    await question.save()
+    answer = await question.add_answer(honza, "Just use `elasticsearch-py`!")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+    return answer
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/percolate.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/percolate.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/percolate.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/percolate.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,117 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import asyncio
+import os
+from typing import TYPE_CHECKING, Any, List, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    Keyword,
+    Percolator,
+    Q,
+    Query,
+    async_connections,
+    mapped_field,
+)
+
+
+class BlogPost(AsyncDocument):
+    """
+    Blog posts that will be automatically tagged based on percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    content: Optional[str]
+    tags: List[str] = mapped_field(Keyword(), default_factory=list)
+
+    class Index:
+        name = "test-blogpost"
+
+    async def add_tags(self) -> None:
+        # run a percolation to automatically tag the blog post.
+        s = AsyncSearch(index="test-percolator")
+        s = s.query(
+            "percolate", field="query", index=self._get_index(), document=self.to_dict()
+        )
+
+        # collect all the tags from matched percolators
+        async for percolator in s:
+            self.tags.extend(percolator.tags)
+
+        # make sure tags are unique
+        self.tags = list(set(self.tags))
+
+    async def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        await self.add_tags()
+        await super().save(**kwargs)
+
+
+class PercolatorDoc(AsyncDocument):
+    """
+    Document class used for storing the percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        _id: str
+
+    # relevant fields from BlogPost must be also present here for the queries
+    # to be able to use them. Another option would be to use document
+    # inheritance but save() would have to be reset to normal behavior.
+    content: Optional[str]
+
+    # the percolator query to be run against the doc
+    query: Query = mapped_field(Percolator())
+    # list of tags to append to a document
+    tags: List[str] = mapped_field(Keyword(multi=True))
+
+    class Index:
+        name = "test-percolator"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def setup() -> None:
+    # create the percolator index if it doesn't exist
+    if not await PercolatorDoc._index.exists():
+        await PercolatorDoc.init()
+
+    # register a percolation query looking for documents about python
+    await PercolatorDoc(
+        _id="python",
+        tags=["programming", "development", "python"],
+        content="",
+        query=Q("match", content="python"),
+    ).save(refresh=True)
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    await setup()
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/search_as_you_type.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/search_as_you_type.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/search_as_you_type.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/search_as_you_type.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,99 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with search_as_you_type field datatype and how to search it.
+
+When creating a field with search_as_you_type datatype ElasticSearch creates additional
+subfields to enable efficient as-you-type completion, matching terms at any position
+within the input.
+
+To custom analyzer with ascii folding allow search to work in different languages.
+"""
+
+import asyncio
+import os
+from typing import TYPE_CHECKING, Optional
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    SearchAsYouType,
+    async_connections,
+    mapped_field,
+)
+from elasticsearch.dsl.query import MultiMatch
+
+
+class Person(AsyncDocument):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="")
+
+    class Index:
+        name = "test-search-as-you-type"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+async def main() -> None:
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    await Person.init()
+
+    import pprint
+
+    pprint.pprint(Person().to_dict(), indent=2)
+
+    # index some sample data
+    names = [
+        "Andy Warhol",
+        "Alphonse Mucha",
+        "Henri de Toulouse-Lautrec",
+        "Jára Cimrman",
+    ]
+    for id, name in enumerate(names):
+        await Person(_id=id, name=name).save()
+
+    # refresh index manually to make changes live
+    await Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"):
+        s = Person.search()
+
+        s.query = MultiMatch(  # type: ignore[assignment]
+            query=text,
+            type="bool_prefix",
+            fields=["name", "name._2gram", "name._3gram"],
+        )
+
+        response = await s.execute()
+
+        # print out all the options we got
+        for h in response:
+            print("%15s: %25s" % (text, h.name))
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/semantic_text.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/semantic_text.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/semantic_text.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/semantic_text.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,148 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+"""
+# Semantic Text example
+
+Requirements:
+
+$ pip install "elasticsearch[async]" tqdm
+
+Before running this example, an ELSER inference endpoint must be created in the
+Elasticsearch cluster. This can be done manually from Kibana, or with the
+following curl command from a terminal:
+
+curl -X PUT \
+  "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \
+  -H "Content-Type: application/json" \
+  -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}'
+
+To run the example:
+
+$ python semantic_text.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to the command to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python semantic_text.py "work from home"
+$ python semantic_text.py "vacation time"
+$ python semantic_text.py "can I bring a bird to work?"
+
+When the index is created, the inference service will split the documents into
+short passages, and for each passage a sparse embedding will be generated using
+Elastic's ELSER v2 model.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, Optional
+from urllib.request import urlopen
+
+from tqdm import tqdm
+
+from elasticsearch import dsl
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+
+class WorkplaceDoc(dsl.AsyncDocument):
+    class Index:
+        name = "workplace_documents_semantic"
+
+    name: str
+    summary: str
+    content: Any = dsl.mapped_field(
+        dsl.field.SemanticText(inference_id="my-elser-endpoint")
+    )
+    created: datetime
+    updated: Optional[datetime]
+    url: str = dsl.mapped_field(dsl.Keyword())
+    category: str = dsl.mapped_field(dsl.Keyword())
+
+
+async def create() -> None:
+
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+    # refresh the index
+    await WorkplaceDoc._index.refresh()
+
+
+async def search(query: str) -> dsl.AsyncSearch[WorkplaceDoc]:
+    search = WorkplaceDoc.search()
+    search = search[:5]
+    return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query))
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    dsl.async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Content: {hit.content.text}")
+        print("--------------------\n")
+
+    # close the connection
+    await dsl.async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/sparse_vectors.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/sparse_vectors.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/sparse_vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/sparse_vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,198 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Sparse vector database example
+
+Requirements:
+
+$ pip install nltk tqdm "elasticsearch[async]"
+
+Before running this example, the ELSER v2 model must be downloaded and deployed
+to the Elasticsearch cluster, and an ingest pipeline must be defined. This can
+be done manually from Kibana, or with the following three curl commands from a
+terminal, adjusting the endpoint as needed:
+
+curl -X PUT \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \
+  -H "Content-Type: application/json" \
+  -d '{"input":{"field_names":["text_field"]}}'
+curl -X POST \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated"
+curl -X PUT \
+  "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \
+  -H "Content-Type: application/json" \
+  -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}'
+
+To run the example:
+
+$ python sparse_vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python sparse_vectors.py "work from home"
+$ python sparse_vectors.py "vacation time"
+$ python sparse_vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage a sparse embedding is generated using Elastic's ELSER v2 model.
+The documents that are returned as search results are those that have the
+highest scored passages. Add `--show-inner-hits` to the command to see
+individual passage results as well.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, Dict, List, Optional
+from urllib.request import urlopen
+
+import nltk
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    InnerDoc,
+    Keyword,
+    Q,
+    SparseVector,
+    async_connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+
+class Passage(InnerDoc):
+    content: Optional[str]
+    embedding: Dict[str, float] = mapped_field(SparseVector(), init=False)
+
+
+class WorkplaceDoc(AsyncDocument):
+    class Index:
+        name = "workplace_documents_sparse"
+        settings = {"default_pipeline": "elser_ingest_pipeline"}
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword())
+    category: str = mapped_field(Keyword())
+    passages: List[Passage] = mapped_field(default=[])
+
+    _model: Any = None
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = nltk.sent_tokenize(self.content)
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(Passage(content=passage))
+
+
+async def create() -> None:
+
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+
+async def search(query: str) -> AsyncSearch[WorkplaceDoc]:
+    return WorkplaceDoc.search()[:5].query(
+        "nested",
+        path="passages",
+        query=Q(
+            "text_expansion",
+            passages__content={
+                "model_id": ".elser_model_2",
+                "model_text": query,
+            },
+        ),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/vectors.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/vectors.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/async/vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,187 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Vector database example
+
+Requirements:
+
+$ pip install nltk sentence_transformers tqdm "elasticsearch[async]"
+
+To run the example:
+
+$ python vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python vectors.py "work from home"
+$ python vectors.py "vacation time"
+$ python vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage an embedding is generated using the open source
+"all-MiniLM-L6-v2" model. The documents that are returned as search results are
+those that have the highest scored passages. Add `--show-inner-hits` to the
+command to see individual passage results as well.
+"""
+
+import argparse
+import asyncio
+import json
+import os
+from datetime import datetime
+from typing import Any, List, Optional, cast
+from urllib.request import urlopen
+
+import nltk
+from sentence_transformers import SentenceTransformer
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    AsyncDocument,
+    AsyncSearch,
+    DenseVector,
+    InnerDoc,
+    Keyword,
+    M,
+    async_connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+MODEL_NAME = "all-MiniLM-L6-v2"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+# this will be the embedding model
+embedding_model: Any = None
+
+
+class Passage(InnerDoc):
+    content: str
+    embedding: List[float] = mapped_field(DenseVector())
+
+
+class WorkplaceDoc(AsyncDocument):
+    class Index:
+        name = "workplace_documents"
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword(required=True))
+    category: str = mapped_field(Keyword(required=True))
+    passages: M[List[Passage]] = mapped_field(default=[])
+
+    @classmethod
+    def get_embedding(cls, input: str) -> List[float]:
+        global embedding_model
+        if embedding_model is None:
+            embedding_model = SentenceTransformer(MODEL_NAME)
+        return cast(List[float], list(embedding_model.encode(input)))
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = cast(List[str], nltk.sent_tokenize(self.content))
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(
+                Passage(content=passage, embedding=self.get_embedding(passage))
+            )
+
+
+async def create() -> None:
+    # create the index
+    await WorkplaceDoc._index.delete(ignore_unavailable=True)
+    await WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        await doc.save()
+
+
+async def search(query: str) -> AsyncSearch[WorkplaceDoc]:
+    return WorkplaceDoc.search().knn(
+        field=WorkplaceDoc.passages.embedding,
+        k=5,
+        num_candidates=50,
+        query_vector=list(WorkplaceDoc.get_embedding(query)),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+async def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not await WorkplaceDoc._index.exists():
+        await create()
+
+    results = await search(args.query)
+
+    async for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    await async_connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    asyncio.run(main())
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/completion.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/completion.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/completion.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/completion.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,113 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with completion suggester.
+
+In the ``Person`` class we index the person's name to allow auto completing in
+any order ("first last", "middle last first", ...). For the weight we use a
+value from the ``popularity`` field which is a long.
+
+To make the suggestions work in different languages we added a custom analyzer
+that does ascii folding.
+"""
+
+import os
+from itertools import permutations
+from typing import TYPE_CHECKING, Any, Dict, Optional
+
+from elasticsearch.dsl import (
+    Completion,
+    Document,
+    Keyword,
+    Long,
+    Text,
+    analyzer,
+    connections,
+    mapped_field,
+    token_filter,
+)
+
+# custom analyzer for names
+ascii_fold = analyzer(
+    "ascii_fold",
+    # we don't want to split O'Brian or Toulouse-Lautrec
+    tokenizer="whitespace",
+    filter=["lowercase", token_filter("ascii_fold", "asciifolding")],
+)
+
+
+class Person(Document):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+    popularity: int = mapped_field(Long(), default=0)
+
+    # completion field with a custom analyzer
+    suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False)
+
+    def clean(self) -> None:
+        """
+        Automatically construct the suggestion input and weight by taking all
+        possible permutations of Person's name as ``input`` and taking their
+        popularity as ``weight``.
+        """
+        self.suggest = {
+            "input": [" ".join(p) for p in permutations(self.name.split())],
+            "weight": self.popularity,
+        }
+
+    class Index:
+        name = "test-suggest"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    Person.init()
+
+    # index some sample data
+    for id, (name, popularity) in enumerate(
+        [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)]
+    ):
+        Person(_id=id, name=name, popularity=popularity).save()
+
+    # refresh index manually to make changes live
+    Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Jara Cimr", "tou", "de hen"):
+        s = Person.search()
+        s = s.suggest("auto_complete", text, completion={"field": "suggest"})
+        response = s.execute()
+
+        # print out all the options we got
+        for option in response.suggest["auto_complete"][0].options:
+            print("%10s: %25s (%d)" % (text, option._source.name, option._score))
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/composite_agg.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/composite_agg.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/composite_agg.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/composite_agg.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,90 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import os
+from typing import Any, Dict, Iterator, Mapping, Sequence, cast
+
+from elasticsearch.dsl import Agg, Response, Search, aggs, connections
+from elasticsearch.dsl.types import CompositeAggregate
+from elasticsearch.helpers import bulk
+from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX
+
+
+def scan_aggs(
+    search: Search,
+    source_aggs: Sequence[Mapping[str, Agg]],
+    inner_aggs: Dict[str, Agg] = {},
+    size: int = 10,
+) -> Iterator[CompositeAggregate]:
+    """
+    Helper function used to iterate over all possible bucket combinations of
+    ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
+    ``composite`` aggregation under the hood to perform this.
+    """
+
+    def run_search(**kwargs: Any) -> Response:
+        s = search[:0]
+        bucket = s.aggs.bucket(
+            "comp",
+            aggs.Composite(
+                sources=source_aggs,
+                size=size,
+                **kwargs,
+            ),
+        )
+        for agg_name, agg in inner_aggs.items():
+            bucket[agg_name] = agg
+        return s.execute()
+
+    response = run_search()
+    while response.aggregations["comp"].buckets:
+        for b in response.aggregations["comp"].buckets:
+            yield cast(CompositeAggregate, b)
+        if "after_key" in response.aggregations["comp"]:
+            after = response.aggregations["comp"].after_key
+        else:
+            after = response.aggregations["comp"].buckets[-1].key
+        response = run_search(after=after)
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    client = connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the index and populate it with some data
+    # note that the dataset is imported from the library's test suite
+    client.indices.delete(index="git", ignore_unavailable=True)
+    client.indices.create(index="git", **GIT_INDEX)
+    bulk(client, DATA, raise_on_error=True, refresh=True)
+
+    # run some aggregations on the data
+    for b in scan_aggs(
+        Search(index="git"),
+        [{"files": aggs.Terms(field="files")}],
+        {"first_seen": aggs.Min(field="committed_date")},
+    ):
+        print(
+            "File %s has been modified %d times, first seen at %s."
+            % (b.key.files, b.doc_count, b.first_seen.value_as_string)
+        )
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/esql_employees.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/esql_employees.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/esql_employees.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/esql_employees.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,169 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# ES|QL query builder example
+
+Requirements:
+
+$ pip install elasticsearch faker
+
+To run the example:
+
+$ python esql_employees.py "name to search"
+
+The index will be created automatically with a list of 1000 randomly generated
+employees if it does not exist. Add `--recreate-index` or `-r` to the command
+to regenerate it.
+
+Examples:
+
+$ python esql_employees "Mark"  # employees named Mark (first or last names)
+$ python esql_employees "Sarah" --limit 10  # up to 10 employees named Sarah
+$ python esql_employees "Sam" --sort height  # sort results by height
+$ python esql_employees "Sam" --sort name  # sort results by last name
+"""
+
+import argparse
+import os
+import random
+
+from faker import Faker
+
+from elasticsearch.dsl import Document, InnerDoc, M, connections
+from elasticsearch.esql import ESQLBase
+from elasticsearch.esql.functions import concat, multi_match
+
+fake = Faker()
+
+
+class Address(InnerDoc):
+    address: M[str]
+    city: M[str]
+    zip_code: M[str]
+
+
+class Employee(Document):
+    emp_no: M[int]
+    first_name: M[str]
+    last_name: M[str]
+    height: M[float]
+    still_hired: M[bool]
+    address: M[Address]
+
+    class Index:
+        name = "employees"
+
+    @property
+    def name(self) -> str:
+        return f"{self.first_name} {self.last_name}"
+
+    def __repr__(self) -> str:
+        return f"<Employee[{self.meta.id}]: {self.first_name} {self.last_name}>"
+
+
+def create(num_employees: int = 1000) -> None:
+    print("Creating a new employee index...")
+    if Employee._index.exists():
+        Employee._index.delete()
+    Employee.init()
+
+    for i in range(num_employees):
+        address = Address(
+            address=fake.address(), city=fake.city(), zip_code=fake.zipcode()
+        )
+        emp = Employee(
+            emp_no=10000 + i,
+            first_name=fake.first_name(),
+            last_name=fake.last_name(),
+            height=int((random.random() * 0.8 + 1.5) * 1000) / 1000,
+            still_hired=random.random() >= 0.5,
+            address=address,
+        )
+        emp.save()
+    Employee._index.refresh()
+
+
+def search(query: str, limit: int, sort: str) -> None:
+    q: ESQLBase = (
+        Employee.esql_from()
+        .where(multi_match(query, Employee.first_name, Employee.last_name))
+        .eval(full_name=concat(Employee.first_name, " ", Employee.last_name))
+    )
+    if sort == "height":
+        q = q.sort(Employee.height.desc())
+    elif sort == "name":
+        q = q.sort(Employee.last_name.asc())
+    q = q.limit(limit)
+    for result in Employee.esql_execute(q, return_additional=True):
+        assert type(result) == tuple
+        employee = result[0]
+        full_name = result[1]["full_name"]
+        print(
+            f"{full_name:<20}",
+            f"{'Hired' if employee.still_hired else 'Not hired':<10}",
+            f"{employee.height:5.2f}m",
+            f"{employee.address.city:<20}",
+        )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Employee ES|QL example")
+    parser.add_argument(
+        "--recreate-index",
+        "-r",
+        action="store_true",
+        help="Recreate and populate the index",
+    )
+    parser.add_argument(
+        "--limit",
+        action="store",
+        type=int,
+        default=100,
+        help="Maximum number or employees to return (default: 100)",
+    )
+    parser.add_argument(
+        "--sort",
+        action="store",
+        type=str,
+        default=None,
+        help='Sort by "name" (ascending) or by "height" (descending) (default: no sorting)',
+    )
+    parser.add_argument(
+        "query", action="store", help="The name or partial name to search for"
+    )
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not Employee._index.exists():
+        create()
+    Employee.init()
+
+    search(args.query, args.limit, args.sort)
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/parent_child.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/parent_child.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/parent_child.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/parent_child.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,275 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Complex data model example modeling stackoverflow-like data.
+
+It is used to showcase several key features of elasticsearch-dsl:
+
+    * Object and Nested fields: see User and Comment classes and fields they
+      are used in
+
+        * method add_comment is used to add comments
+
+    * Parent/Child relationship
+
+        * See the Join field on Post creating the relationship between Question
+          and Answer
+
+        * Meta.matches allows the hits from same index to be wrapped in proper
+          classes
+
+        * to see how child objects are created see Question.add_answer
+
+        * Question.search_answers shows how to query for children of a
+          particular parent
+
+"""
+import os
+from datetime import datetime
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast
+
+from elasticsearch.dsl import (
+    Date,
+    Document,
+    InnerDoc,
+    Join,
+    Keyword,
+    Long,
+    Search,
+    Text,
+    connections,
+    mapped_field,
+)
+
+
+class User(InnerDoc):
+    """
+    Class used to represent a denormalized user stored on other objects.
+    """
+
+    id: int = mapped_field(Long())
+    signed_up: Optional[datetime] = mapped_field(Date())
+    username: str = mapped_field(Text(fields={"keyword": Keyword()}))
+    email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+    location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()}))
+
+
+class Comment(InnerDoc):
+    """
+    Class wrapper for nested comment objects.
+    """
+
+    author: User
+    created: datetime
+    content: str
+
+
+class Post(Document):
+    """
+    Base class for Question and Answer containing the common fields.
+    """
+
+    author: User
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _routing: str = mapped_field(default=None)
+        _id: Optional[int] = mapped_field(default=None)
+
+    created: Optional[datetime] = mapped_field(default=None)
+    body: str = mapped_field(default="")
+    comments: List[Comment] = mapped_field(default_factory=list)
+    question_answer: Any = mapped_field(
+        Join(relations={"question": "answer"}), default_factory=dict
+    )
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        # Post is an abstract class, make sure it never gets used for
+        # deserialization
+        return False
+
+    class Index:
+        name = "test-qa-site"
+        settings = {
+            "number_of_shards": 1,
+            "number_of_replicas": 0,
+        }
+
+    def add_comment(
+        self,
+        user: User,
+        content: str,
+        created: Optional[datetime] = None,
+        commit: Optional[bool] = True,
+    ) -> Comment:
+        c = Comment(author=user, content=content, created=created or datetime.now())
+        self.comments.append(c)
+        if commit:
+            self.save()
+        return c
+
+    def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # if there is no date, use now
+        if self.created is None:
+            self.created = datetime.now()
+        super().save(**kwargs)
+
+
+class Question(Post):
+    tags: List[str] = mapped_field(
+        default_factory=list
+    )  # .tags will return empty list if not present
+    title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="")
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Question class for parent documents"""
+        return bool(hit["_source"]["question_answer"] == "question")
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> Search:  # type: ignore[override]
+        return cls._index.search(**kwargs).filter("term", question_answer="question")
+
+    def add_answer(
+        self,
+        user: User,
+        body: str,
+        created: Optional[datetime] = None,
+        accepted: bool = False,
+        commit: Optional[bool] = True,
+    ) -> "Answer":
+        answer = Answer(
+            # required make sure the answer is stored in the same shard
+            _routing=self.meta.id,
+            # set up the parent/child mapping
+            question_answer={"name": "answer", "parent": self.meta.id},
+            # pass in the field values
+            author=user,
+            created=created,
+            body=body,
+            is_accepted=accepted,
+        )
+        if commit:
+            answer.save()
+        return answer
+
+    def search_answers(self) -> Search:
+        # search only our index
+        s = Answer.search()
+        # filter for answers belonging to us
+        s = s.filter("parent_id", type="answer", id=self.meta.id)
+        # add routing to only go to specific shard
+        s = s.params(routing=self.meta.id)
+        return s
+
+    def get_answers(self) -> List[Any]:
+        """
+        Get answers either from inner_hits already present or by searching
+        elasticsearch.
+        """
+        if "inner_hits" in self.meta and "answer" in self.meta.inner_hits:
+            return cast(List[Any], self.meta.inner_hits["answer"].hits)
+        return [a for a in self.search_answers()]
+
+    def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        self.question_answer = "question"
+        super().save(**kwargs)
+
+
+class Answer(Post):
+    is_accepted: bool = mapped_field(default=False)
+
+    @classmethod
+    def _matches(cls, hit: Dict[str, Any]) -> bool:
+        """Use Answer class for child documents with child name 'answer'"""
+        return (
+            isinstance(hit["_source"]["question_answer"], dict)
+            and hit["_source"]["question_answer"].get("name") == "answer"
+        )
+
+    @classmethod
+    def search(cls, **kwargs: Any) -> Search:  # type: ignore[override]
+        return cls._index.search(**kwargs).exclude("term", question_answer="question")
+
+    def get_question(self) -> Optional[Question]:
+        # cache question in self.meta
+        # any attributes set on self would be interpreted as fields
+        if "question" not in self.meta:
+            self.meta.question = Question.get(
+                id=self.question_answer.parent, index=self.meta.index
+            )
+        return cast(Optional[Question], self.meta.question)
+
+    def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        # set routing to parents id automatically
+        self.meta.routing = self.question_answer.parent
+        super().save(**kwargs)
+
+
+def setup() -> None:
+    """Create an IndexTemplate and save it into elasticsearch."""
+    index_template = Post._index.as_composable_template("base", priority=100)
+    index_template.save()
+
+
+def main() -> Answer:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create index
+    setup()
+
+    # user objects to use
+    nick = User(
+        id=47,
+        signed_up=datetime(2017, 4, 3),
+        username="fxdgear",
+        email="nick.lang@elastic.co",
+        location="Colorado",
+    )
+    honza = User(
+        id=42,
+        signed_up=datetime(2013, 4, 3),
+        username="honzakral",
+        email="honza@elastic.co",
+        location="Prague",
+    )
+
+    # create a question object
+    question = Question(
+        _id=1,
+        author=nick,
+        tags=["elasticsearch", "python"],
+        title="How do I use elasticsearch from Python?",
+        body="""
+        I want to use elasticsearch, how do I do it from Python?
+        """,
+    )
+    question.save()
+    answer = question.add_answer(honza, "Just use `elasticsearch-py`!")
+
+    # close the connection
+    connections.get_connection().close()
+
+    return answer
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/percolate.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/percolate.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/percolate.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/percolate.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,116 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import os
+from typing import TYPE_CHECKING, Any, List, Optional
+
+from elasticsearch.dsl import (
+    Document,
+    Keyword,
+    Percolator,
+    Q,
+    Query,
+    Search,
+    connections,
+    mapped_field,
+)
+
+
+class BlogPost(Document):
+    """
+    Blog posts that will be automatically tagged based on percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: int
+
+    content: Optional[str]
+    tags: List[str] = mapped_field(Keyword(), default_factory=list)
+
+    class Index:
+        name = "test-blogpost"
+
+    def add_tags(self) -> None:
+        # run a percolation to automatically tag the blog post.
+        s = Search(index="test-percolator")
+        s = s.query(
+            "percolate", field="query", index=self._get_index(), document=self.to_dict()
+        )
+
+        # collect all the tags from matched percolators
+        for percolator in s:
+            self.tags.extend(percolator.tags)
+
+        # make sure tags are unique
+        self.tags = list(set(self.tags))
+
+    def save(self, **kwargs: Any) -> None:  # type: ignore[override]
+        self.add_tags()
+        super().save(**kwargs)
+
+
+class PercolatorDoc(Document):
+    """
+    Document class used for storing the percolation queries.
+    """
+
+    if TYPE_CHECKING:
+        _id: str
+
+    # relevant fields from BlogPost must be also present here for the queries
+    # to be able to use them. Another option would be to use document
+    # inheritance but save() would have to be reset to normal behavior.
+    content: Optional[str]
+
+    # the percolator query to be run against the doc
+    query: Query = mapped_field(Percolator())
+    # list of tags to append to a document
+    tags: List[str] = mapped_field(Keyword(multi=True))
+
+    class Index:
+        name = "test-percolator"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+def setup() -> None:
+    # create the percolator index if it doesn't exist
+    if not PercolatorDoc._index.exists():
+        PercolatorDoc.init()
+
+    # register a percolation query looking for documents about python
+    PercolatorDoc(
+        _id="python",
+        tags=["programming", "development", "python"],
+        content="",
+        query=Q("match", content="python"),
+    ).save(refresh=True)
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    setup()
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/search_as_you_type.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/search_as_you_type.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/search_as_you_type.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/search_as_you_type.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,98 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+Example ``Document`` with search_as_you_type field datatype and how to search it.
+
+When creating a field with search_as_you_type datatype ElasticSearch creates additional
+subfields to enable efficient as-you-type completion, matching terms at any position
+within the input.
+
+To custom analyzer with ascii folding allow search to work in different languages.
+"""
+
+import os
+from typing import TYPE_CHECKING, Optional
+
+from elasticsearch.dsl import (
+    Document,
+    SearchAsYouType,
+    connections,
+    mapped_field,
+)
+from elasticsearch.dsl.query import MultiMatch
+
+
+class Person(Document):
+    if TYPE_CHECKING:
+        # definitions here help type checkers understand additional arguments
+        # that are allowed in the constructor
+        _id: Optional[int] = mapped_field(default=None)
+
+    name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="")
+
+    class Index:
+        name = "test-search-as-you-type"
+        settings = {"number_of_shards": 1, "number_of_replicas": 0}
+
+
+def main() -> None:
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    # create the empty index
+    Person.init()
+
+    import pprint
+
+    pprint.pprint(Person().to_dict(), indent=2)
+
+    # index some sample data
+    names = [
+        "Andy Warhol",
+        "Alphonse Mucha",
+        "Henri de Toulouse-Lautrec",
+        "Jára Cimrman",
+    ]
+    for id, name in enumerate(names):
+        Person(_id=id, name=name).save()
+
+    # refresh index manually to make changes live
+    Person._index.refresh()
+
+    # run some suggestions
+    for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"):
+        s = Person.search()
+
+        s.query = MultiMatch(  # type: ignore[assignment]
+            query=text,
+            type="bool_prefix",
+            fields=["name", "name._2gram", "name._3gram"],
+        )
+
+        response = s.execute()
+
+        # print out all the options we got
+        for h in response:
+            print("%15s: %25s" % (text, h.name))
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/semantic_text.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/semantic_text.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/semantic_text.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/semantic_text.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,147 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+
+"""
+# Semantic Text example
+
+Requirements:
+
+$ pip install elasticsearch tqdm
+
+Before running this example, an ELSER inference endpoint must be created in the
+Elasticsearch cluster. This can be done manually from Kibana, or with the
+following curl command from a terminal:
+
+curl -X PUT \
+  "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \
+  -H "Content-Type: application/json" \
+  -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}'
+
+To run the example:
+
+$ python semantic_text.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to the command to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python semantic_text.py "work from home"
+$ python semantic_text.py "vacation time"
+$ python semantic_text.py "can I bring a bird to work?"
+
+When the index is created, the inference service will split the documents into
+short passages, and for each passage a sparse embedding will be generated using
+Elastic's ELSER v2 model.
+"""
+
+import argparse
+import json
+import os
+from datetime import datetime
+from typing import Any, Optional
+from urllib.request import urlopen
+
+from tqdm import tqdm
+
+from elasticsearch import dsl
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+
+class WorkplaceDoc(dsl.Document):
+    class Index:
+        name = "workplace_documents_semantic"
+
+    name: str
+    summary: str
+    content: Any = dsl.mapped_field(
+        dsl.field.SemanticText(inference_id="my-elser-endpoint")
+    )
+    created: datetime
+    updated: Optional[datetime]
+    url: str = dsl.mapped_field(dsl.Keyword())
+    category: str = dsl.mapped_field(dsl.Keyword())
+
+
+def create() -> None:
+
+    # create the index
+    WorkplaceDoc._index.delete(ignore_unavailable=True)
+    WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        doc.save()
+
+    # refresh the index
+    WorkplaceDoc._index.refresh()
+
+
+def search(query: str) -> dsl.Search[WorkplaceDoc]:
+    search = WorkplaceDoc.search()
+    search = search[:5]
+    return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query))
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    dsl.connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not WorkplaceDoc._index.exists():
+        create()
+
+    results = search(args.query)
+
+    for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Content: {hit.content.text}")
+        print("--------------------\n")
+
+    # close the connection
+    dsl.connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/sparse_vectors.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/sparse_vectors.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/sparse_vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/sparse_vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,197 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Sparse vector database example
+
+Requirements:
+
+$ pip install nltk tqdm elasticsearch
+
+Before running this example, the ELSER v2 model must be downloaded and deployed
+to the Elasticsearch cluster, and an ingest pipeline must be defined. This can
+be done manually from Kibana, or with the following three curl commands from a
+terminal, adjusting the endpoint as needed:
+
+curl -X PUT \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \
+  -H "Content-Type: application/json" \
+  -d '{"input":{"field_names":["text_field"]}}'
+curl -X POST \
+  "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated"
+curl -X PUT \
+  "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \
+  -H "Content-Type: application/json" \
+  -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}'
+
+To run the example:
+
+$ python sparse_vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python sparse_vectors.py "work from home"
+$ python sparse_vectors.py "vacation time"
+$ python sparse_vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage a sparse embedding is generated using Elastic's ELSER v2 model.
+The documents that are returned as search results are those that have the
+highest scored passages. Add `--show-inner-hits` to the command to see
+individual passage results as well.
+"""
+
+import argparse
+import json
+import os
+from datetime import datetime
+from typing import Any, Dict, List, Optional
+from urllib.request import urlopen
+
+import nltk
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    Document,
+    InnerDoc,
+    Keyword,
+    Q,
+    Search,
+    SparseVector,
+    connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+
+class Passage(InnerDoc):
+    content: Optional[str]
+    embedding: Dict[str, float] = mapped_field(SparseVector(), init=False)
+
+
+class WorkplaceDoc(Document):
+    class Index:
+        name = "workplace_documents_sparse"
+        settings = {"default_pipeline": "elser_ingest_pipeline"}
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword())
+    category: str = mapped_field(Keyword())
+    passages: List[Passage] = mapped_field(default=[])
+
+    _model: Any = None
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = nltk.sent_tokenize(self.content)
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(Passage(content=passage))
+
+
+def create() -> None:
+
+    # create the index
+    WorkplaceDoc._index.delete(ignore_unavailable=True)
+    WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        doc.save()
+
+
+def search(query: str) -> Search[WorkplaceDoc]:
+    return WorkplaceDoc.search()[:5].query(
+        "nested",
+        path="passages",
+        query=Q(
+            "text_expansion",
+            passages__content={
+                "model_id": ".elser_model_2",
+                "model_text": query,
+            },
+        ),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not WorkplaceDoc._index.exists():
+        create()
+
+    results = search(args.query)
+
+    for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/vectors.py 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/vectors.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_integration/test_examples/examples/vectors.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_integration/test_examples/examples/vectors.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,186 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+"""
+# Vector database example
+
+Requirements:
+
+$ pip install nltk sentence_transformers tqdm elasticsearch
+
+To run the example:
+
+$ python vectors.py "text to search"
+
+The index will be created automatically if it does not exist. Add
+`--recreate-index` to regenerate it.
+
+The example dataset includes a selection of workplace documents. The
+following are good example queries to try out with this dataset:
+
+$ python vectors.py "work from home"
+$ python vectors.py "vacation time"
+$ python vectors.py "can I bring a bird to work?"
+
+When the index is created, the documents are split into short passages, and for
+each passage an embedding is generated using the open source
+"all-MiniLM-L6-v2" model. The documents that are returned as search results are
+those that have the highest scored passages. Add `--show-inner-hits` to the
+command to see individual passage results as well.
+"""
+
+import argparse
+import json
+import os
+from datetime import datetime
+from typing import Any, List, Optional, cast
+from urllib.request import urlopen
+
+import nltk
+from sentence_transformers import SentenceTransformer
+from tqdm import tqdm
+
+from elasticsearch.dsl import (
+    DenseVector,
+    Document,
+    InnerDoc,
+    Keyword,
+    M,
+    Search,
+    connections,
+    mapped_field,
+)
+
+DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
+MODEL_NAME = "all-MiniLM-L6-v2"
+
+# initialize sentence tokenizer
+nltk.download("punkt_tab", quiet=True)
+
+# this will be the embedding model
+embedding_model: Any = None
+
+
+class Passage(InnerDoc):
+    content: str
+    embedding: List[float] = mapped_field(DenseVector())
+
+
+class WorkplaceDoc(Document):
+    class Index:
+        name = "workplace_documents"
+
+    name: str
+    summary: str
+    content: str
+    created: datetime
+    updated: Optional[datetime]
+    url: str = mapped_field(Keyword(required=True))
+    category: str = mapped_field(Keyword(required=True))
+    passages: M[List[Passage]] = mapped_field(default=[])
+
+    @classmethod
+    def get_embedding(cls, input: str) -> List[float]:
+        global embedding_model
+        if embedding_model is None:
+            embedding_model = SentenceTransformer(MODEL_NAME)
+        return cast(List[float], list(embedding_model.encode(input)))
+
+    def clean(self) -> None:
+        # split the content into sentences
+        passages = cast(List[str], nltk.sent_tokenize(self.content))
+
+        # generate an embedding for each passage and save it as a nested document
+        for passage in passages:
+            self.passages.append(
+                Passage(content=passage, embedding=self.get_embedding(passage))
+            )
+
+
+def create() -> None:
+    # create the index
+    WorkplaceDoc._index.delete(ignore_unavailable=True)
+    WorkplaceDoc.init()
+
+    # download the data
+    dataset = json.loads(urlopen(DATASET_URL).read())
+
+    # import the dataset
+    for data in tqdm(dataset, desc="Indexing documents..."):
+        doc = WorkplaceDoc(
+            name=data["name"],
+            summary=data["summary"],
+            content=data["content"],
+            created=data.get("created_on"),
+            updated=data.get("updated_at"),
+            url=data["url"],
+            category=data["category"],
+        )
+        doc.save()
+
+
+def search(query: str) -> Search[WorkplaceDoc]:
+    return WorkplaceDoc.search().knn(
+        field=WorkplaceDoc.passages.embedding,
+        k=5,
+        num_candidates=50,
+        query_vector=list(WorkplaceDoc.get_embedding(query)),
+        inner_hits={"size": 2},
+    )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
+    parser.add_argument(
+        "--recreate-index", action="store_true", help="Recreate and populate the index"
+    )
+    parser.add_argument(
+        "--show-inner-hits",
+        action="store_true",
+        help="Show results for individual passages",
+    )
+    parser.add_argument("query", action="store", help="The search query")
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+
+    # initiate the default connection to elasticsearch
+    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])
+
+    if args.recreate_index or not WorkplaceDoc._index.exists():
+        create()
+
+    results = search(args.query)
+
+    for hit in results:
+        print(
+            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
+        )
+        print(f"Summary: {hit.summary}")
+        if args.show_inner_hits:
+            for passage in hit.meta.inner_hits["passages"]:
+                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
+        print("")
+
+    # close the connection
+    connections.get_connection().close()
+
+
+if __name__ == "__main__":
+    main()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_package.py 9.2.0-1/test_elasticsearch/test_dsl/test_package.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_package.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_package.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,22 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import elasticsearch.dsl
+
+
+def test__all__is_sorted() -> None:
+    assert elasticsearch.dsl.__all__ == sorted(elasticsearch.dsl.__all__)
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_query.py 9.2.0-1/test_elasticsearch/test_dsl/test_query.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_query.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,671 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from pytest import raises
+
+from elasticsearch.dsl import function, query, utils
+
+
+def test_empty_Q_is_match_all() -> None:
+    q = query.Q()
+
+    assert isinstance(q, query.MatchAll)
+    assert query.MatchAll() == q
+
+
+def test_combined_fields_to_dict() -> None:
+    assert {
+        "combined_fields": {
+            "query": "this is a test",
+            "fields": ["name", "body", "description"],
+            "operator": "and",
+        },
+    } == query.CombinedFields(
+        query="this is a test",
+        fields=["name", "body", "description"],
+        operator="and",
+    ).to_dict()
+
+
+def test_combined_fields_to_dict_extra() -> None:
+    assert {
+        "combined_fields": {
+            "query": "this is a test",
+            "fields": ["name", "body^2"],
+            "operator": "or",
+        },
+    } == query.CombinedFields(
+        query="this is a test",
+        fields=["name", "body^2"],
+        operator="or",
+    ).to_dict()
+
+
+def test_match_to_dict() -> None:
+    assert {"match": {"f": "value"}} == query.Match(f="value").to_dict()
+
+
+def test_match_to_dict_extra() -> None:
+    assert {"match": {"f": "value", "boost": 2}} == query.Match(
+        f="value", boost=2
+    ).to_dict()
+
+
+def test_fuzzy_to_dict() -> None:
+    assert {"fuzzy": {"f": "value"}} == query.Fuzzy(f="value").to_dict()
+
+
+def test_prefix_to_dict() -> None:
+    assert {"prefix": {"f": "value"}} == query.Prefix(f="value").to_dict()
+
+
+def test_term_to_dict() -> None:
+    assert {"term": {"_type": "article"}} == query.Term(_type="article").to_dict()
+
+
+def test_terms_to_dict() -> None:
+    assert {"terms": {"_type": ["article", "section"]}} == query.Terms(
+        _type=["article", "section"]
+    ).to_dict()
+    assert {"terms": {"_type": ["article", "section"], "boost": 1.1}} == query.Terms(
+        _type=("article", "section"), boost=1.1
+    ).to_dict()
+    assert {"terms": {"_type": "article", "boost": 1.1}} == query.Terms(
+        _type="article", boost=1.1
+    ).to_dict()
+    assert {
+        "terms": {"_id": {"index": "my-other-index", "id": "my-id"}, "boost": 1.1}
+    } == query.Terms(
+        _id={"index": "my-other-index", "id": "my-id"}, boost=1.1
+    ).to_dict()
+
+
+def test_bool_to_dict() -> None:
+    bool = query.Bool(must=[query.Match(f="value")], should=[])
+
+    assert {"bool": {"must": [{"match": {"f": "value"}}]}} == bool.to_dict()
+
+
+def test_dismax_to_dict() -> None:
+    assert {"dis_max": {"queries": [{"term": {"_type": "article"}}]}} == query.DisMax(
+        queries=[query.Term(_type="article")]
+    ).to_dict()
+
+
+def test_bool_from_dict_issue_318() -> None:
+    d = {"bool": {"must_not": {"match": {"field": "value"}}}}
+    q = query.Q(d)
+
+    assert q == ~query.Match(field="value")
+
+
+def test_repr() -> None:
+    bool = query.Bool(must=[query.Match(f="value")], should=[])
+
+    assert "Bool(must=[Match(f='value')])" == repr(bool)
+
+
+def test_query_clone() -> None:
+    bool = query.Bool(
+        must=[query.Match(x=42)],
+        should=[query.Match(g="v2")],
+        must_not=[query.Match(title="value")],
+    )
+    bool_clone = bool._clone()
+
+    assert bool == bool_clone
+    assert bool is not bool_clone
+
+
+def test_bool_converts_its_init_args_to_queries() -> None:
+    q = query.Bool(must=[{"match": {"f": "value"}}])  # type: ignore
+
+    assert len(q.must) == 1
+    assert q.must[0] == query.Match(f="value")
+
+
+def test_two_queries_make_a_bool() -> None:
+    q1 = query.Match(f="value1")
+    q2 = query.Match(message={"query": "this is a test", "opeartor": "and"})
+    q = q1 & q2
+
+    assert isinstance(q, query.Bool)
+    assert [q1, q2] == q.must
+
+
+def test_other_and_bool_appends_other_to_must() -> None:
+    q1 = query.Match(f="value1")
+    qb = query.Bool()
+
+    q = q1 & qb
+    assert q is not qb
+    assert q.must[0] == q1
+
+
+def test_bool_and_other_appends_other_to_must() -> None:
+    q1 = query.Match(f="value1")
+    qb = query.Bool()
+
+    q = qb & q1
+    assert q is not qb
+    assert q.must[0] == q1
+
+
+def test_bool_and_other_sets_min_should_match_if_needed() -> None:
+    q1 = query.Q("term", category=1)
+    q2 = query.Q(
+        "bool", should=[query.Q("term", name="aaa"), query.Q("term", name="bbb")]
+    )
+
+    q = q1 & q2
+    assert q == query.Bool(
+        must=[q1],
+        should=[query.Q("term", name="aaa"), query.Q("term", name="bbb")],
+        minimum_should_match=1,
+    )
+
+
+def test_bool_with_different_minimum_should_match_should_not_be_combined() -> None:
+    q1 = query.Q(
+        "bool",
+        minimum_should_match=2,
+        should=[
+            query.Q("term", field="aa1"),
+            query.Q("term", field="aa2"),
+            query.Q("term", field="aa3"),
+            query.Q("term", field="aa4"),
+        ],
+    )
+    q2 = query.Q(
+        "bool",
+        minimum_should_match=3,
+        should=[
+            query.Q("term", field="bb1"),
+            query.Q("term", field="bb2"),
+            query.Q("term", field="bb3"),
+            query.Q("term", field="bb4"),
+        ],
+    )
+    q3 = query.Q(
+        "bool",
+        minimum_should_match=4,
+        should=[
+            query.Q("term", field="cc1"),
+            query.Q("term", field="cc2"),
+            query.Q("term", field="cc3"),
+            query.Q("term", field="cc4"),
+        ],
+    )
+
+    q4 = q1 | q2
+    assert q4 == query.Bool(should=[q1, q2])
+
+    q5 = q1 | q2 | q3
+    assert q5 == query.Bool(should=[q1, q2, q3])
+
+
+def test_empty_bool_has_min_should_match_0() -> None:
+    assert 0 == query.Bool()._min_should_match
+
+
+def test_query_and_query_creates_bool() -> None:
+    q1 = query.Match(f=42)
+    q2 = query.Match(g=47)
+
+    q = q1 & q2
+    assert isinstance(q, query.Bool)
+    assert q.must == [q1, q2]
+
+
+def test_match_all_and_query_equals_other() -> None:
+    q1 = query.Match(f=42)
+    q2 = query.MatchAll()
+
+    q = q1 & q2
+    assert q1 == q
+
+
+def test_not_match_all_is_match_none() -> None:
+    q = query.MatchAll()
+
+    assert ~q == query.MatchNone()
+
+
+def test_not_match_none_is_match_all() -> None:
+    q = query.MatchNone()
+
+    assert ~q == query.MatchAll()
+
+
+def test_invert_empty_bool_is_match_none() -> None:
+    q = query.Bool()
+
+    assert ~q == query.MatchNone()
+
+
+def test_match_none_or_query_equals_query() -> None:
+    q1 = query.Match(f=42)
+    q2 = query.MatchNone()
+
+    assert q1 | q2 == query.Match(f=42)
+
+
+def test_match_none_and_query_equals_match_none() -> None:
+    q1 = query.Match(f=42)
+    q2 = query.MatchNone()
+
+    assert q1 & q2 == query.MatchNone()
+
+
+def test_bool_and_bool() -> None:
+    qt1, qt2, qt3 = query.Match(f=1), query.Match(f=2), query.Match(f=3)
+
+    q1 = query.Bool(must=[qt1], should=[qt2])
+    q2 = query.Bool(must_not=[qt3])
+    assert q1 & q2 == query.Bool(
+        must=[qt1], must_not=[qt3], should=[qt2], minimum_should_match=0
+    )
+
+    q1 = query.Bool(must=[qt1], should=[qt1, qt2])
+    q2 = query.Bool(should=[qt3])
+    assert q1 & q2 == query.Bool(
+        must=[qt1, qt3], should=[qt1, qt2], minimum_should_match=0
+    )
+
+
+def test_bool_and_bool_with_min_should_match() -> None:
+    qt1, qt2 = query.Match(f=1), query.Match(f=2)
+    q1 = query.Q("bool", minimum_should_match=1, should=[qt1])
+    q2 = query.Q("bool", minimum_should_match=1, should=[qt2])
+
+    assert query.Q("bool", must=[qt1, qt2]) == q1 & q2
+
+
+def test_negative_min_should_match() -> None:
+    qt1, qt2 = query.Match(f=1), query.Match(f=2)
+    q1 = query.Q("bool", minimum_should_match=-2, should=[qt1])
+    q2 = query.Q("bool", minimum_should_match=1, should=[qt2])
+
+    with raises(ValueError):
+        q1 & q2
+    with raises(ValueError):
+        q2 & q1
+
+
+def test_percentage_min_should_match() -> None:
+    qt1, qt2 = query.Match(f=1), query.Match(f=2)
+    q1 = query.Q("bool", minimum_should_match="50%", should=[qt1])
+    q2 = query.Q("bool", minimum_should_match=1, should=[qt2])
+
+    with raises(ValueError):
+        q1 & q2
+    with raises(ValueError):
+        q2 & q1
+
+
+def test_inverted_query_becomes_bool_with_must_not() -> None:
+    q = query.Match(f=42)
+
+    assert ~q == query.Bool(must_not=[query.Match(f=42)])
+
+
+def test_inverted_query_with_must_not_become_should() -> None:
+    q = query.Q("bool", must_not=[query.Q("match", f=1), query.Q("match", f=2)])
+
+    assert ~q == query.Q("bool", should=[query.Q("match", f=1), query.Q("match", f=2)])
+
+
+def test_inverted_query_with_must_and_must_not() -> None:
+    q = query.Q(
+        "bool",
+        must=[query.Q("match", f=3), query.Q("match", f=4)],
+        must_not=[query.Q("match", f=1), query.Q("match", f=2)],
+    )
+    print((~q).to_dict())
+    assert ~q == query.Q(
+        "bool",
+        should=[
+            # negation of must
+            query.Q("bool", must_not=[query.Q("match", f=3)]),
+            query.Q("bool", must_not=[query.Q("match", f=4)]),
+            # negation of must_not
+            query.Q("match", f=1),
+            query.Q("match", f=2),
+        ],
+    )
+
+
+def test_double_invert_returns_original_query() -> None:
+    q = query.Match(f=42)
+
+    assert q == ~~q
+
+
+def test_bool_query_gets_inverted_internally() -> None:
+    q = query.Bool(must_not=[query.Match(f=42)], must=[query.Match(g="v")])
+
+    assert ~q == query.Bool(
+        should=[
+            # negating must
+            query.Bool(must_not=[query.Match(g="v")]),
+            # negating must_not
+            query.Match(f=42),
+        ]
+    )
+
+
+def test_match_all_or_something_is_match_all() -> None:
+    q1 = query.MatchAll()
+    q2 = query.Match(f=42)
+
+    assert (q1 | q2) == query.MatchAll()
+    assert (q2 | q1) == query.MatchAll()
+
+
+def test_or_produces_bool_with_should() -> None:
+    q1 = query.Match(f=42)
+    q2 = query.Match(g="v")
+
+    q = q1 | q2
+    assert q == query.Bool(should=[q1, q2])
+
+
+def test_or_bool_doesnt_loop_infinitely_issue_37() -> None:
+    q = query.Match(f=42) | ~query.Match(f=47)
+
+    assert q == query.Bool(
+        should=[query.Bool(must_not=[query.Match(f=47)]), query.Match(f=42)]
+    )
+
+
+def test_or_bool_doesnt_loop_infinitely_issue_96() -> None:
+    q = ~query.Match(f=42) | ~query.Match(f=47)
+
+    assert q == query.Bool(
+        should=[
+            query.Bool(must_not=[query.Match(f=42)]),
+            query.Bool(must_not=[query.Match(f=47)]),
+        ]
+    )
+
+
+def test_bool_will_append_another_query_with_or() -> None:
+    qb = query.Bool(should=[query.Match(f="v"), query.Match(f="v2")])
+    q = query.Match(g=42)
+
+    assert (q | qb) == query.Bool(should=[query.Match(f="v"), query.Match(f="v2"), q])
+
+
+def test_bool_queries_with_only_should_get_concatenated() -> None:
+    q1 = query.Bool(should=[query.Match(f=1), query.Match(f=2)])
+    q2 = query.Bool(should=[query.Match(f=3), query.Match(f=4)])
+
+    assert (q1 | q2) == query.Bool(
+        should=[query.Match(f=1), query.Match(f=2), query.Match(f=3), query.Match(f=4)]
+    )
+
+
+def test_two_bool_queries_append_one_to_should_if_possible() -> None:
+    q1 = query.Bool(should=[query.Match(f="v")])
+    q2 = query.Bool(must=[query.Match(f="v")])
+
+    assert (q1 | q2) == query.Bool(
+        should=[query.Match(f="v"), query.Bool(must=[query.Match(f="v")])]
+    )
+    assert (q2 | q1) == query.Bool(
+        should=[query.Match(f="v"), query.Bool(must=[query.Match(f="v")])]
+    )
+
+
+def test_queries_are_registered() -> None:
+    assert "match" in query.Query._classes
+    assert query.Query._classes["match"] is query.Match
+
+
+def test_defining_query_registers_it() -> None:
+    class MyQuery(query.Query):
+        name = "my_query"
+
+    assert "my_query" in query.Query._classes
+    assert query.Query._classes["my_query"] is MyQuery
+
+
+def test_Q_passes_query_through() -> None:
+    q = query.Match(f="value1")
+
+    assert query.Q(q) is q
+
+
+def test_Q_constructs_query_by_name() -> None:
+    q = query.Q("match", f="value")
+
+    assert isinstance(q, query.Match)
+    assert {"f": "value"} == q._params
+
+
+def test_Q_translates_double_underscore_to_dots_in_param_names() -> None:
+    q = query.Q("match", comment__author="honza")
+
+    assert {"comment.author": "honza"} == q._params
+
+
+def test_Q_doesn_translate_double_underscore_to_dots_in_param_names() -> None:
+    q = query.Q("match", comment__author="honza", _expand__to_dot=False)
+
+    assert {"comment__author": "honza"} == q._params
+
+
+def test_Q_constructs_simple_query_from_dict() -> None:
+    q = query.Q({"match": {"f": "value"}})
+
+    assert isinstance(q, query.Match)
+    assert {"f": "value"} == q._params
+
+
+def test_Q_constructs_compound_query_from_dict() -> None:
+    q = query.Q({"bool": {"must": [{"match": {"f": "value"}}]}})
+
+    assert q == query.Bool(must=[query.Match(f="value")])
+
+
+def test_Q_raises_error_when_passed_in_dict_and_params() -> None:
+    with raises(Exception):
+        # Ignore types as it's not a valid call
+        query.Q({"match": {"f": "value"}}, f="value")  # type: ignore[call-overload]
+
+
+def test_Q_raises_error_when_passed_in_query_and_params() -> None:
+    q = query.Match(f="value1")
+
+    with raises(Exception):
+        # Ignore types as it's not a valid call signature
+        query.Q(q, f="value")  # type: ignore[call-overload]
+
+
+def test_Q_raises_error_on_unknown_query() -> None:
+    with raises(Exception):
+        query.Q("not a query", f="value")
+
+
+def test_match_all_and_anything_is_anything() -> None:
+    q = query.MatchAll()
+
+    s = query.Match(f=42)
+    assert q & s == s
+    assert s & q == s
+
+
+def test_function_score_with_functions() -> None:
+    q = query.Q(
+        "function_score",
+        functions=[query.SF("script_score", script="doc['comment_count'] * _score")],
+    )
+
+    assert {
+        "function_score": {
+            "functions": [{"script_score": {"script": "doc['comment_count'] * _score"}}]
+        }
+    } == q.to_dict()
+
+
+def test_function_score_with_no_function_is_boost_factor() -> None:
+    q = query.Q(
+        "function_score",
+        functions=[query.SF({"weight": 20, "filter": query.Q("term", f=42)})],
+    )
+
+    assert {
+        "function_score": {"functions": [{"filter": {"term": {"f": 42}}, "weight": 20}]}
+    } == q.to_dict()
+
+
+def test_function_score_to_dict() -> None:
+    q = query.Q(
+        "function_score",
+        query=query.Q("match", title="python"),
+        functions=[
+            query.SF("random_score"),
+            query.SF(
+                "field_value_factor",
+                field="comment_count",
+                filter=query.Q("term", tags="python"),
+            ),
+        ],
+    )
+
+    d = {
+        "function_score": {
+            "query": {"match": {"title": "python"}},
+            "functions": [
+                {"random_score": {}},
+                {
+                    "filter": {"term": {"tags": "python"}},
+                    "field_value_factor": {"field": "comment_count"},
+                },
+            ],
+        }
+    }
+    assert d == q.to_dict()
+
+
+def test_function_score_class_based_to_dict() -> None:
+    q = query.FunctionScore(
+        query=query.Match(title="python"),
+        functions=[
+            function.RandomScore(),
+            function.FieldValueFactor(
+                field="comment_count",
+                filter=query.Term(tags="python"),
+            ),
+        ],
+    )
+
+    d = {
+        "function_score": {
+            "query": {"match": {"title": "python"}},
+            "functions": [
+                {"random_score": {}},
+                {
+                    "filter": {"term": {"tags": "python"}},
+                    "field_value_factor": {"field": "comment_count"},
+                },
+            ],
+        }
+    }
+    assert d == q.to_dict()
+
+
+def test_function_score_with_single_function() -> None:
+    d = {
+        "function_score": {
+            "filter": {"term": {"tags": "python"}},
+            "script_score": {"script": "doc['comment_count'] * _score"},
+        }
+    }
+
+    q = query.Q(d)
+    assert isinstance(q, query.FunctionScore)
+    assert isinstance(q.filter, query.Term)
+    assert len(q.functions) == 1
+
+    sf = q.functions[0]
+    assert isinstance(sf, function.ScriptScore)
+    assert "doc['comment_count'] * _score" == sf.script
+
+
+def test_function_score_from_dict() -> None:
+    d = {
+        "function_score": {
+            "filter": {"term": {"tags": "python"}},
+            "functions": [
+                {
+                    "filter": {"terms": {"tags": "python"}},
+                    "script_score": {"script": "doc['comment_count'] * _score"},
+                },
+                {"boost_factor": 6},
+            ],
+        }
+    }
+
+    q = query.Q(d)
+    assert isinstance(q, query.FunctionScore)
+    assert isinstance(q.filter, query.Term)
+    assert len(q.functions) == 2
+
+    sf = q.functions[0]
+    assert isinstance(sf, function.ScriptScore)
+    assert isinstance(sf.filter, query.Terms)
+
+    sf = q.functions[1]
+    assert isinstance(sf, function.BoostFactor)
+    assert 6 == sf.value
+    assert {"boost_factor": 6} == sf.to_dict()
+
+
+def test_script_score() -> None:
+    d = {
+        "script_score": {
+            "query": {"match_all": {}},
+            "script": {"source": "...", "params": {}},
+        }
+    }
+    q = query.Q(d)
+
+    assert isinstance(q, query.ScriptScore)
+    assert isinstance(q.query, query.MatchAll)
+    assert q.script == {"source": "...", "params": {}}
+    assert q.to_dict() == d
+
+
+def test_expand_double_underscore_to_dot_setting() -> None:
+    q = query.Term(comment__count=2)
+    assert q.to_dict() == {"term": {"comment.count": 2}}
+    utils.EXPAND__TO_DOT = False
+    q = query.Term(comment__count=2)
+    assert q.to_dict() == {"term": {"comment__count": 2}}
+    utils.EXPAND__TO_DOT = True
+
+
+def test_knn_query() -> None:
+    q = query.Knn(field="image-vector", query_vector=[-5, 9, -12], num_candidates=10)
+    assert q.to_dict() == {
+        "knn": {
+            "field": "image-vector",
+            "query_vector": [-5, 9, -12],
+            "num_candidates": 10,
+        }
+    }
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_result.py 9.2.0-1/test_elasticsearch/test_dsl/test_result.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_result.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_result.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,215 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pickle
+from datetime import date
+from typing import Any, Dict
+
+from pytest import fixture, raises
+
+from elasticsearch.dsl import Date, Document, Object, Search, response
+from elasticsearch.dsl.aggs import Terms
+from elasticsearch.dsl.response.aggs import AggResponse, Bucket, BucketData
+from elasticsearch.dsl.utils import AttrDict
+
+
+@fixture
+def agg_response(aggs_search: Search, aggs_data: Dict[str, Any]) -> response.Response:
+    return response.Response(aggs_search, aggs_data)
+
+
+def test_agg_response_is_pickleable(agg_response: response.Response) -> None:
+    agg_response.hits
+    r = pickle.loads(pickle.dumps(agg_response))
+
+    assert r == agg_response
+    assert r._search == agg_response._search
+    assert r.hits == agg_response.hits
+
+
+def test_response_is_pickleable(dummy_response: Dict[str, Any]) -> None:
+    res = response.Response(Search(), dummy_response.body)  # type: ignore[attr-defined]
+    res.hits
+    r = pickle.loads(pickle.dumps(res))
+
+    assert r == res
+    assert r._search == res._search
+    assert r.hits == res.hits
+
+
+def test_hit_is_pickleable(dummy_response: Dict[str, Any]) -> None:
+    res = response.Response(Search(), dummy_response)
+    hits = pickle.loads(pickle.dumps(res.hits))
+
+    assert hits == res.hits
+    assert hits[0].meta == res.hits[0].meta
+
+
+def test_response_stores_search(dummy_response: Dict[str, Any]) -> None:
+    s = Search()
+    r = response.Response(s, dummy_response)
+
+    assert r._search is s
+
+
+def test_attribute_error_in_hits_is_not_hidden(dummy_response: Dict[str, Any]) -> None:
+    def f(hit: AttrDict[Any]) -> Any:
+        raise AttributeError()
+
+    s = Search().doc_type(employee=f)
+    r = response.Response(s, dummy_response)
+    with raises(TypeError):
+        r.hits
+
+
+def test_interactive_helpers(dummy_response: Dict[str, Any]) -> None:
+    res = response.Response(Search(), dummy_response)
+    hits = res.hits
+    h = hits[0]
+
+    rhits = (
+        "[<Hit(test-index/elasticsearch): {}>, <Hit(test-index/42): {}...}}>, "
+        "<Hit(test-index/47): {}...}}>, <Hit(test-index/53): {{}}>]"
+    ).format(
+        repr(dummy_response["hits"]["hits"][0]["_source"]),
+        repr(dummy_response["hits"]["hits"][1]["_source"])[:60],
+        repr(dummy_response["hits"]["hits"][2]["_source"])[:60],
+    )
+
+    assert res
+    assert f"<Response: {rhits}>" == repr(res)
+    assert rhits == repr(hits)
+    assert {"meta", "city", "name"} == set(dir(h))
+    assert "<Hit(test-index/elasticsearch): %r>" % dummy_response["hits"]["hits"][0][
+        "_source"
+    ] == repr(h)
+
+
+def test_empty_response_is_false(dummy_response: Dict[str, Any]) -> None:
+    dummy_response["hits"]["hits"] = []
+    res = response.Response(Search(), dummy_response)
+
+    assert not res
+
+
+def test_len_response(dummy_response: Dict[str, Any]) -> None:
+    res = response.Response(Search(), dummy_response)
+    assert len(res) == 4
+
+
+def test_iterating_over_response_gives_you_hits(dummy_response: Dict[str, Any]) -> None:
+    res = response.Response(Search(), dummy_response)
+    hits = list(h for h in res)
+
+    assert res.success()
+    assert 123 == res.took
+    assert 4 == len(hits)
+    assert all(isinstance(h, response.Hit) for h in hits)
+    h = hits[0]
+
+    assert "test-index" == h.meta.index
+    assert "company" == h.meta.doc_type
+    assert "elasticsearch" == h.meta.id
+    assert 12 == h.meta.score
+
+    assert hits[1].meta.routing == "elasticsearch"
+
+
+def test_hits_get_wrapped_to_contain_additional_attrs(
+    dummy_response: Dict[str, Any],
+) -> None:
+    res = response.Response(Search(), dummy_response)
+    hits = res.hits
+
+    assert 123 == hits.total  # type: ignore[attr-defined]
+    assert 12.0 == hits.max_score  # type: ignore[attr-defined]
+
+
+def test_hits_provide_dot_and_bracket_access_to_attrs(
+    dummy_response: Dict[str, Any],
+) -> None:
+    res = response.Response(Search(), dummy_response)
+    h = res.hits[0]
+
+    assert "Elasticsearch" == h.name
+    assert "Elasticsearch" == h["name"]
+
+    assert "Honza" == res.hits[2].name.first
+
+    with raises(KeyError):
+        h["not_there"]
+
+    with raises(AttributeError):
+        h.not_there
+
+
+def test_slicing_on_response_slices_on_hits(dummy_response: Dict[str, Any]) -> None:
+    res = response.Response(Search(), dummy_response)
+
+    assert res[0] is res.hits[0]
+    assert res[::-1] == res.hits[::-1]
+
+
+def test_aggregation_base(agg_response: response.Response) -> None:
+    assert agg_response.aggs is agg_response.aggregations
+    assert isinstance(agg_response.aggs, response.AggResponse)
+
+
+def test_metric_agg_works(agg_response: response.Response) -> None:
+    assert 25052.0 == agg_response.aggs.sum_lines.value
+
+
+def test_aggregations_can_be_iterated_over(agg_response: response.Response) -> None:
+    aggs = [a for a in agg_response.aggs]
+
+    assert len(aggs) == 3
+    assert all(map(lambda a: isinstance(a, AggResponse), aggs))
+
+
+def test_aggregations_can_be_retrieved_by_name(
+    agg_response: response.Response, aggs_search: Search
+) -> None:
+    a = agg_response.aggs["popular_files"]
+
+    assert isinstance(a, BucketData)
+    assert isinstance(a._meta["aggs"], Terms)
+    assert a._meta["aggs"] is aggs_search.aggs.aggs["popular_files"]
+
+
+def test_bucket_response_can_be_iterated_over(agg_response: response.Response) -> None:
+    popular_files = agg_response.aggregations.popular_files
+
+    buckets = [b for b in popular_files]
+    assert all(isinstance(b, Bucket) for b in buckets)
+    assert buckets == popular_files.buckets
+
+
+def test_bucket_keys_get_deserialized(
+    aggs_data: Dict[str, Any], aggs_search: Search
+) -> None:
+    class Commit(Document):
+        info = Object(properties={"committed_date": Date()})
+
+        class Index:
+            name = "test-commit"
+
+    aggs_search = aggs_search.doc_type(Commit)
+    agg_response = response.Response(aggs_search, aggs_data)
+
+    per_month = agg_response.aggregations.per_month
+    for b in per_month:
+        assert isinstance(b.key, date)
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_utils.py 9.2.0-1/test_elasticsearch/test_dsl/test_utils.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_utils.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_utils.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,136 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import pickle
+from typing import Any, Dict, Tuple
+
+from pytest import raises
+
+from elasticsearch.dsl import Q, serializer, utils
+
+
+def test_attrdict_pickle() -> None:
+    ad: utils.AttrDict[str] = utils.AttrDict({})
+
+    pickled_ad = pickle.dumps(ad)
+    assert ad == pickle.loads(pickled_ad)
+
+
+def test_attrlist_pickle() -> None:
+    al = utils.AttrList[Any]([])
+
+    pickled_al = pickle.dumps(al)
+    assert al == pickle.loads(pickled_al)
+
+
+def test_attrlist_slice() -> None:
+    class MyAttrDict(utils.AttrDict[str]):
+        pass
+
+    l = utils.AttrList[Any]([{}, {}], obj_wrapper=MyAttrDict)
+    assert isinstance(l[:][0], MyAttrDict)
+
+
+def test_attrlist_with_type_argument() -> None:
+    a = utils.AttrList[str](["a", "b"])
+    assert list(a) == ["a", "b"]
+
+
+def test_attrdict_keys_items() -> None:
+    a = utils.AttrDict({"a": {"b": 42, "c": 47}, "d": "e"})
+    assert list(a.keys()) == ["a", "d"]
+    assert list(a.items()) == [("a", {"b": 42, "c": 47}), ("d", "e")]
+
+
+def test_attrdict_with_type_argument() -> None:
+    a = utils.AttrDict[str]({"a": "b"})
+    assert list(a.keys()) == ["a"]
+    assert list(a.items()) == [("a", "b")]
+
+
+def test_merge() -> None:
+    a: utils.AttrDict[Any] = utils.AttrDict({"a": {"b": 42, "c": 47}})
+    b = {"a": {"b": 123, "d": -12}, "e": [1, 2, 3]}
+
+    utils.merge(a, b)
+
+    assert a == {"a": {"b": 123, "c": 47, "d": -12}, "e": [1, 2, 3]}
+
+
+def test_merge_conflict() -> None:
+    data: Tuple[Dict[str, Any], ...] = (
+        {"a": 42},
+        {"a": {"b": 47}},
+    )
+    for d in data:
+        utils.merge({"a": {"b": 42}}, d)
+        with raises(ValueError):
+            utils.merge({"a": {"b": 42}}, d, True)
+
+
+def test_attrdict_bool() -> None:
+    d: utils.AttrDict[str] = utils.AttrDict({})
+
+    assert not d
+    d.title = "Title"
+    assert d
+
+
+def test_attrlist_items_get_wrapped_during_iteration() -> None:
+    al = utils.AttrList([1, object(), [1], {}])
+
+    l = list(iter(al))
+
+    assert isinstance(l[2], utils.AttrList)
+    assert isinstance(l[3], utils.AttrDict)
+
+
+def test_serializer_deals_with_Attr_versions() -> None:
+    d = utils.AttrDict({"key": utils.AttrList([1, 2, 3])})
+
+    assert serializer.serializer.dumps(d) == serializer.serializer.dumps(
+        {"key": [1, 2, 3]}
+    )
+
+
+def test_serializer_deals_with_objects_with_to_dict() -> None:
+    class MyClass:
+        def to_dict(self) -> int:
+            return 42
+
+    assert serializer.serializer.dumps(MyClass()) == b"42"
+
+
+def test_recursive_to_dict() -> None:
+    assert utils.recursive_to_dict({"k": [1, (1.0, {"v": Q("match", key="val")})]}) == {
+        "k": [1, (1.0, {"v": {"match": {"key": "val"}}})]
+    }
+
+
+def test_attrlist_to_list() -> None:
+    l = utils.AttrList[Any]([{}, {}]).to_list()
+    assert isinstance(l, list)
+    assert l == [{}, {}]
+
+
+def test_attrdict_with_reserved_keyword() -> None:
+    d = utils.AttrDict({"from": 10, "size": 20})
+    assert d.from_ == 10
+    assert d.size == 20
+    d = utils.AttrDict({})
+    d.from_ = 10
+    assert {"from": 10} == d.to_dict()
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_validation.py 9.2.0-1/test_elasticsearch/test_dsl/test_validation.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_validation.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_validation.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,162 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime
+from typing import Any
+
+from pytest import raises
+
+from elasticsearch.dsl import (
+    Date,
+    Document,
+    InnerDoc,
+    Integer,
+    Nested,
+    Object,
+    Text,
+    mapped_field,
+)
+from elasticsearch.dsl.exceptions import ValidationException
+
+
+class Author(InnerDoc):
+    name: str
+    email: str
+
+    def clean(self) -> None:
+        if not self.name:
+            raise ValidationException("name is missing")
+        if not self.email:
+            raise ValidationException("email is missing")
+        elif self.name.lower() not in self.email:
+            raise ValidationException("Invalid email!")
+
+
+class BlogPost(Document):
+    authors = Nested(Author, required=True)
+    created = Date()
+    inner = Object()
+
+
+class BlogPostWithStatus(Document):
+    published: bool = mapped_field(init=False)
+
+
+class AutoNowDate(Date):
+    def clean(self, data: Any) -> Any:
+        if data is None:
+            data = datetime.now()
+        return super().clean(data)
+
+
+class Log(Document):
+    timestamp = AutoNowDate(required=True)
+    data = Text()
+
+
+def test_required_int_can_be_0() -> None:
+    class DT(Document):
+        i = Integer(required=True)
+
+    dt = DT(i=0)
+    dt.full_clean()
+
+
+def test_required_field_cannot_be_empty_list() -> None:
+    class DT(Document):
+        i = Integer(required=True)
+
+    dt = DT(i=[])
+    with raises(ValidationException):
+        dt.full_clean()
+
+
+def test_validation_works_for_lists_of_values() -> None:
+    class DT(Document):
+        i = Date(required=True)
+
+    dt = DT(i=[datetime.now(), "not date"])
+    with raises(ValidationException):
+        dt.full_clean()
+
+    dt = DT(i=[datetime.now(), datetime.now()])
+    dt.full_clean()
+
+
+def test_field_with_custom_clean() -> None:
+    l = Log()
+    l.full_clean()
+
+    assert isinstance(l.timestamp, datetime)
+
+
+def test_empty_object() -> None:
+    d = BlogPost(authors=[{"name": "Honza", "email": "honza@elastic.co"}])
+    d.inner = {}  # type: ignore[assignment]
+
+    d.full_clean()
+
+
+def test_missing_required_field_raises_validation_exception() -> None:
+    d = BlogPost()
+    with raises(ValidationException):
+        d.full_clean()
+
+    d = BlogPost()
+    d.authors.append({"name": "Honza"})
+    with raises(ValidationException):
+        d.full_clean()
+
+    d = BlogPost()
+    d.authors.append({"name": "Honza", "email": "honza@elastic.co"})
+    d.full_clean()
+
+
+def test_boolean_doesnt_treat_false_as_empty() -> None:
+    d = BlogPostWithStatus()
+    with raises(ValidationException):
+        d.full_clean()
+    d.published = False
+    d.full_clean()
+    d.published = True
+    d.full_clean()
+
+
+def test_custom_validation_on_nested_gets_run() -> None:
+    d = BlogPost(authors=[Author(name="Honza", email="king@example.com")], created=None)
+
+    assert isinstance(d.authors[0], Author)  # type: ignore[index]
+
+    with raises(ValidationException):
+        d.full_clean()
+
+
+def test_accessing_known_fields_returns_empty_value() -> None:
+    d = BlogPost()
+
+    assert [] == d.authors
+
+    d.authors.append({})
+    assert None is d.authors[0].name  # type: ignore[index]
+    assert None is d.authors[0].email
+
+
+def test_empty_values_are_not_serialized() -> None:
+    d = BlogPost(authors=[{"name": "Honza", "email": "honza@elastic.co"}], created=None)
+
+    d.full_clean()
+    assert d.to_dict() == {"authors": [{"name": "Honza", "email": "honza@elastic.co"}]}
diff -pruN 8.17.2-2/test_elasticsearch/test_dsl/test_wrappers.py 9.2.0-1/test_elasticsearch/test_dsl/test_wrappers.py
--- 8.17.2-2/test_elasticsearch/test_dsl/test_wrappers.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_dsl/test_wrappers.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,111 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from datetime import datetime, timedelta
+from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence
+
+if TYPE_CHECKING:
+    from _operator import _SupportsComparison
+
+import pytest
+
+from elasticsearch.dsl import Range
+
+
+@pytest.mark.parametrize(
+    "kwargs, item",
+    [
+        ({}, 1),
+        ({}, -1),
+        ({"gte": -1}, -1),
+        ({"lte": 4}, 4),
+        ({"lte": 4, "gte": 2}, 4),
+        ({"lte": 4, "gte": 2}, 2),
+        ({"gt": datetime.now() - timedelta(seconds=10)}, datetime.now()),
+    ],
+)
+def test_range_contains(
+    kwargs: Mapping[str, "_SupportsComparison"], item: "_SupportsComparison"
+) -> None:
+    assert item in Range(**kwargs)
+
+
+@pytest.mark.parametrize(
+    "kwargs, item",
+    [
+        ({"gt": -1}, -1),
+        ({"lt": 4}, 4),
+        ({"lt": 4}, 42),
+        ({"lte": 4, "gte": 2}, 1),
+        ({"lte": datetime.now() - timedelta(seconds=10)}, datetime.now()),
+    ],
+)
+def test_range_not_contains(
+    kwargs: Mapping[str, "_SupportsComparison"], item: "_SupportsComparison"
+) -> None:
+    assert item not in Range(**kwargs)
+
+
+@pytest.mark.parametrize(
+    "args,kwargs",
+    [
+        (({},), {"lt": 42}),
+        ((), {"not_lt": 42}),
+        ((object(),), {}),
+        ((), {"lt": 1, "lte": 1}),
+        ((), {"gt": 1, "gte": 1}),
+    ],
+)
+def test_range_raises_value_error_on_wrong_params(
+    args: Sequence[Any], kwargs: Mapping[str, "_SupportsComparison"]
+) -> None:
+    with pytest.raises(ValueError):
+        Range(*args, **kwargs)
+
+
+@pytest.mark.parametrize(
+    "range,lower,inclusive",
+    [
+        (Range(gt=1), 1, False),
+        (Range(gte=1), 1, True),
+        (Range(), None, False),
+        (Range(lt=42), None, False),
+    ],
+)
+def test_range_lower(
+    range: Range["_SupportsComparison"],
+    lower: Optional["_SupportsComparison"],
+    inclusive: bool,
+) -> None:
+    assert (lower, inclusive) == range.lower
+
+
+@pytest.mark.parametrize(
+    "range,upper,inclusive",
+    [
+        (Range(lt=1), 1, False),
+        (Range(lte=1), 1, True),
+        (Range(), None, False),
+        (Range(gt=42), None, False),
+    ],
+)
+def test_range_upper(
+    range: Range["_SupportsComparison"],
+    upper: Optional["_SupportsComparison"],
+    inclusive: bool,
+) -> None:
+    assert (upper, inclusive) == range.upper
diff -pruN 8.17.2-2/test_elasticsearch/test_esql.py 9.2.0-1/test_elasticsearch/test_esql.py
--- 8.17.2-2/test_elasticsearch/test_esql.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_esql.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,964 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from elasticsearch.dsl import E
+from elasticsearch.esql import ESQL, and_, functions, not_, or_
+
+
+def test_from():
+    query = ESQL.from_("employees")
+    assert query.render() == "FROM employees"
+
+    query = ESQL.from_("<logs-{now/d}>")
+    assert query.render() == "FROM <logs-{now/d}>"
+
+    query = ESQL.from_("employees-00001", "other-employees-*")
+    assert query.render() == "FROM employees-00001, other-employees-*"
+
+    query = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*")
+    assert (
+        query.render()
+        == "FROM cluster_one:employees-00001, cluster_two:other-employees-*"
+    )
+
+    query = ESQL.from_("employees").metadata("_id")
+    assert query.render() == "FROM employees METADATA _id"
+
+
+def test_row():
+    query = ESQL.row(a=1, b="two", c=None)
+    assert query.render() == 'ROW a = 1, b = "two", c = null'
+
+    query = ESQL.row(a=[2, 1])
+    assert query.render() == "ROW a = [2, 1]"
+
+    query = ESQL.row(a=functions.round(1.23, 0))
+    assert query.render() == "ROW a = ROUND(1.23, 0)"
+
+
+def test_show():
+    query = ESQL.show("INFO")
+    assert query.render() == "SHOW INFO"
+
+
+def test_ts():
+    query = (
+        ESQL.ts("metrics")
+        .where("@timestamp >= now() - 1 day")
+        .stats("SUM(AVG_OVER_TIME(memory_usage))")
+        .by("host", "TBUCKET(1 hour)")
+    )
+    assert (
+        query.render()
+        == """TS metrics
+| WHERE @timestamp >= now() - 1 day
+| STATS SUM(AVG_OVER_TIME(memory_usage))
+        BY host, TBUCKET(1 hour)"""
+    )
+
+
+def test_change_point():
+    query = (
+        ESQL.row(key=list(range(1, 26)))
+        .mv_expand("key")
+        .eval(value=functions.case(E("key") < 13, 0, 42))
+        .change_point("value")
+        .on("key")
+        .where("type IS NOT NULL")
+    )
+    assert (
+        query.render()
+        == """ROW key = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
+| MV_EXPAND key
+| EVAL value = CASE(key < 13, 0, 42)
+| CHANGE_POINT value ON key
+| WHERE type IS NOT NULL"""
+    )
+
+
+def test_completion():
+    query = (
+        ESQL.row(question="What is Elasticsearch?")
+        .completion("question")
+        .with_("test_completion_model")
+        .keep("question", "completion")
+    )
+    assert (
+        query.render()
+        == """ROW question = "What is Elasticsearch?"
+| COMPLETION question WITH {"inference_id": "test_completion_model"}
+| KEEP question, completion"""
+    )
+
+    query = (
+        ESQL.row(question="What is Elasticsearch?")
+        .completion(answer=E("question"))
+        .with_("test_completion_model")
+        .keep("question", "answer")
+    )
+    assert (
+        query.render()
+        == """ROW question = "What is Elasticsearch?"
+| COMPLETION answer = question WITH {"inference_id": "test_completion_model"}
+| KEEP question, answer"""
+    )
+
+    query = (
+        ESQL.from_("movies")
+        .sort("rating DESC")
+        .limit(10)
+        .eval(
+            prompt="""CONCAT(
+      "Summarize this movie using the following information: \\n",
+      "Title: ", title, "\\n",
+      "Synopsis: ", synopsis, "\\n",
+      "Actors: ", MV_CONCAT(actors, ", "), "\\n",
+  )"""
+        )
+        .completion(summary="prompt")
+        .with_("test_completion_model")
+        .keep("title", "summary", "rating")
+    )
+    assert (
+        query.render()
+        == """FROM movies
+| SORT rating DESC
+| LIMIT 10
+| EVAL prompt = CONCAT(
+      "Summarize this movie using the following information: \\n",
+      "Title: ", title, "\\n",
+      "Synopsis: ", synopsis, "\\n",
+      "Actors: ", MV_CONCAT(actors, ", "), "\\n",
+  )
+| COMPLETION summary = prompt WITH {"inference_id": "test_completion_model"}
+| KEEP title, summary, rating"""
+    )
+
+    query = (
+        ESQL.from_("movies")
+        .sort("rating DESC")
+        .limit(10)
+        .eval(
+            prompt=functions.concat(
+                "Summarize this movie using the following information: \n",
+                "Title: ",
+                E("title"),
+                "\n",
+                "Synopsis: ",
+                E("synopsis"),
+                "\n",
+                "Actors: ",
+                functions.mv_concat(E("actors"), ", "),
+                "\n",
+            )
+        )
+        .completion(summary="prompt")
+        .with_("test_completion_model")
+        .keep("title", "summary", "rating")
+    )
+    assert (
+        query.render()
+        == """FROM movies
+| SORT rating DESC
+| LIMIT 10
+| EVAL prompt = CONCAT("Summarize this movie using the following information: \\n", "Title: ", title, "\\n", "Synopsis: ", synopsis, "\\n", "Actors: ", MV_CONCAT(actors, ", "), "\\n")
+| COMPLETION summary = prompt WITH {"inference_id": "test_completion_model"}
+| KEEP title, summary, rating"""
+    )
+
+
+def test_dissect():
+    query = (
+        ESQL.row(a="2023-01-23T12:15:00.000Z - some text - 127.0.0.1")
+        .dissect("a", "%{date} - %{msg} - %{ip}")
+        .keep("date", "msg", "ip")
+    )
+    assert (
+        query.render()
+        == """ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1"
+| DISSECT a "%{date} - %{msg} - %{ip}"
+| KEEP date, msg, ip"""
+    )
+
+
+def test_drop():
+    query = ESQL.from_("employees").drop("height")
+    assert query.render() == "FROM employees\n| DROP height"
+    query = ESQL.from_("employees").drop("height*")
+    assert query.render() == "FROM employees\n| DROP height*"
+
+
+def test_enrich():
+    query = ESQL.row(language_code="1").enrich("languages_policy")
+    assert (
+        query.render()
+        == """ROW language_code = "1"
+| ENRICH languages_policy"""
+    )
+
+    query = ESQL.row(language_code="1").enrich("languages_policy").on("a")
+    assert (
+        query.render()
+        == """ROW language_code = "1"
+| ENRICH languages_policy ON a"""
+    )
+
+    query = (
+        ESQL.row(language_code="1")
+        .enrich("languages_policy")
+        .on("a")
+        .with_(name="language_name")
+    )
+    assert (
+        query.render()
+        == """ROW language_code = "1"
+| ENRICH languages_policy ON a WITH name = language_name"""
+    )
+
+
+def test_eval():
+    query = (
+        ESQL.from_("employees")
+        .sort("emp_no")
+        .keep("first_name", "last_name", "height")
+        .eval(height_feet=E("height") * 3.281, height_cm=E("height") * 100)
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| SORT emp_no
+| KEEP first_name, last_name, height
+| EVAL height_feet = height * 3.281, height_cm = height * 100"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .sort("emp_no")
+        .keep("first_name", "last_name", "height")
+        .eval(E("height") * 3.281)
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| SORT emp_no
+| KEEP first_name, last_name, height
+| EVAL height * 3.281"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .eval("height * 3.281")
+        .stats(avg_height_feet=functions.avg(E("`height * 3.281`")))
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| EVAL height * 3.281
+| STATS avg_height_feet = AVG(`height * 3.281`)"""
+    )
+
+
+def test_fork():
+    query = (
+        ESQL.from_("employees")
+        .fork(
+            ESQL.branch().where(E("emp_no") == 10001),
+            ESQL.branch().where("emp_no == 10002"),
+        )
+        .keep("emp_no", "_fork")
+        .sort("emp_no")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| FORK ( WHERE emp_no == 10001 )
+       ( WHERE emp_no == 10002 )
+| KEEP emp_no, _fork
+| SORT emp_no"""
+    )
+
+
+def test_fuse():
+    query = (
+        ESQL.from_("books")
+        .metadata("_id", "_index", "_score")
+        .fork(
+            ESQL.branch().where('title:"Shakespeare"').sort("_score DESC"),
+            ESQL.branch().where('semantic_title:"Shakespeare"').sort("_score DESC"),
+        )
+        .fuse()
+    )
+    assert (
+        query.render()
+        == """FROM books METADATA _id, _index, _score
+| FORK ( WHERE title:"Shakespeare" | SORT _score DESC )
+       ( WHERE semantic_title:"Shakespeare" | SORT _score DESC )
+| FUSE"""
+    )
+
+    query = (
+        ESQL.from_("books")
+        .metadata("_id", "_index", "_score")
+        .fork(
+            ESQL.branch().where('title:"Shakespeare"').sort("_score DESC"),
+            ESQL.branch().where('semantic_title:"Shakespeare"').sort("_score DESC"),
+        )
+        .fuse("linear")
+    )
+    assert (
+        query.render()
+        == """FROM books METADATA _id, _index, _score
+| FORK ( WHERE title:"Shakespeare" | SORT _score DESC )
+       ( WHERE semantic_title:"Shakespeare" | SORT _score DESC )
+| FUSE LINEAR"""
+    )
+
+    query = (
+        ESQL.from_("books")
+        .metadata("_id", "_index", "_score")
+        .fork(
+            ESQL.branch().where('title:"Shakespeare"').sort("_score DESC"),
+            ESQL.branch().where('semantic_title:"Shakespeare"').sort("_score DESC"),
+        )
+        .fuse("linear")
+        .by("title", "description")
+    )
+    assert (
+        query.render()
+        == """FROM books METADATA _id, _index, _score
+| FORK ( WHERE title:"Shakespeare" | SORT _score DESC )
+       ( WHERE semantic_title:"Shakespeare" | SORT _score DESC )
+| FUSE LINEAR BY title BY description"""
+    )
+
+    query = (
+        ESQL.from_("books")
+        .metadata("_id", "_index", "_score")
+        .fork(
+            ESQL.branch().where('title:"Shakespeare"').sort("_score DESC"),
+            ESQL.branch().where('semantic_title:"Shakespeare"').sort("_score DESC"),
+        )
+        .fuse("linear")
+        .with_(normalizer="minmax")
+    )
+    assert (
+        query.render()
+        == """FROM books METADATA _id, _index, _score
+| FORK ( WHERE title:"Shakespeare" | SORT _score DESC )
+       ( WHERE semantic_title:"Shakespeare" | SORT _score DESC )
+| FUSE LINEAR WITH {"normalizer": "minmax"}"""
+    )
+
+
+def test_grok():
+    query = (
+        ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42")
+        .grok(
+            "a",
+            "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}",
+        )
+        .keep("date", "ip", "email", "num")
+    )
+    assert (
+        query.render()
+        == """ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42"
+| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}"
+| KEEP date, ip, email, num"""
+    )
+
+    query = (
+        ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42")
+        .grok(
+            "a",
+            "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}",
+        )
+        .keep("date", "ip", "email", "num")
+        .eval(date=functions.to_datetime(E("date")))
+    )
+    assert (
+        query.render()
+        == """ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42"
+| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}"
+| KEEP date, ip, email, num
+| EVAL date = TO_DATETIME(date)"""
+    )
+
+    query = (
+        ESQL.from_("addresses")
+        .keep("city.name", "zip_code")
+        .grok("zip_code", "%{WORD:zip_parts} %{WORD:zip_parts}")
+    )
+    assert (
+        query.render()
+        == """FROM addresses
+| KEEP city.name, zip_code
+| GROK zip_code "%{WORD:zip_parts} %{WORD:zip_parts}\""""
+    )
+
+
+def test_inline_stats():
+    query = (
+        ESQL.from_("employees")
+        .keep("emp_no", "languages", "salary")
+        .inline_stats(max_salary=functions.max(E("salary")))
+        .by("languages")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP emp_no, languages, salary
+| INLINE STATS max_salary = MAX(salary)
+               BY languages"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .keep("emp_no", "languages", "salary")
+        .inline_stats(max_salary=functions.max(E("salary")))
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP emp_no, languages, salary
+| INLINE STATS max_salary = MAX(salary)"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .where("still_hired")
+        .keep("emp_no", "languages", "salary", "hire_date")
+        .eval(tenure=functions.date_diff("year", E("hire_date"), "2025-09-18T00:00:00"))
+        .drop("hire_date")
+        .inline_stats(
+            avg_salary=functions.avg(E("salary")),
+            count=functions.count(E("*")),
+        )
+        .by("languages", "tenure")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| WHERE still_hired
+| KEEP emp_no, languages, salary, hire_date
+| EVAL tenure = DATE_DIFF("year", hire_date, "2025-09-18T00:00:00")
+| DROP hire_date
+| INLINE STATS avg_salary = AVG(salary),
+               count = COUNT(*)
+               BY languages, tenure"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .keep("emp_no", "salary")
+        .inline_stats(
+            avg_lt_50=functions.round(functions.avg(E("salary"))).where(
+                E("salary") < 50000
+            ),
+            avg_lt_60=functions.round(functions.avg(E("salary"))).where(
+                E("salary") >= 50000, E("salary") < 60000
+            ),
+            avg_gt_60=functions.round(functions.avg(E("salary"))).where(
+                E("salary") >= 60000
+            ),
+        )
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP emp_no, salary
+| INLINE STATS avg_lt_50 = ROUND(AVG(salary)) WHERE salary < 50000,
+               avg_lt_60 = ROUND(AVG(salary)) WHERE (salary >= 50000) AND (salary < 60000),
+               avg_gt_60 = ROUND(AVG(salary)) WHERE salary >= 60000"""
+    )
+
+
+def test_keep():
+    query = ESQL.from_("employees").keep("emp_no", "first_name", "last_name", "height")
+    assert (
+        query.render() == "FROM employees\n| KEEP emp_no, first_name, last_name, height"
+    )
+
+    query = ESQL.from_("employees").keep("h*")
+    assert query.render() == "FROM employees\n| KEEP h*"
+
+    query = ESQL.from_("employees").keep("*", "first_name")
+    assert query.render() == "FROM employees\n| KEEP *, first_name"
+
+
+def test_limit():
+    query = ESQL.from_("index").where(E("field") == "value").limit(1000)
+    assert query.render() == 'FROM index\n| WHERE field == "value"\n| LIMIT 1000'
+
+    query = (
+        ESQL.from_("index").stats(functions.avg(E("field1"))).by("field2").limit(20000)
+    )
+    assert (
+        query.render()
+        == "FROM index\n| STATS AVG(field1)\n        BY field2\n| LIMIT 20000"
+    )
+
+
+def test_lookup_join():
+    query = (
+        ESQL.from_("firewall_logs")
+        .lookup_join("threat_list")
+        .on("source.IP")
+        .where("threat_level IS NOT NULL")
+    )
+    assert (
+        query.render()
+        == """FROM firewall_logs
+| LOOKUP JOIN threat_list ON source.IP
+| WHERE threat_level IS NOT NULL"""
+    )
+
+    query = (
+        ESQL.from_("system_metrics")
+        .lookup_join("host_inventory")
+        .on("host.name")
+        .lookup_join("ownerships")
+        .on("host.name")
+    )
+    assert (
+        query.render()
+        == """FROM system_metrics
+| LOOKUP JOIN host_inventory ON host.name
+| LOOKUP JOIN ownerships ON host.name"""
+    )
+
+    query = ESQL.from_("app_logs").lookup_join("service_owners").on("service_id")
+    assert (
+        query.render()
+        == """FROM app_logs
+| LOOKUP JOIN service_owners ON service_id"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .eval(language_code="languages")
+        .where(E("emp_no") >= 10091, E("emp_no") < 10094)
+        .lookup_join("languages_lookup")
+        .on("language_code")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| EVAL language_code = languages
+| WHERE emp_no >= 10091 AND emp_no < 10094
+| LOOKUP JOIN languages_lookup ON language_code"""
+    )
+
+
+def test_mv_expand():
+    query = ESQL.row(a=[1, 2, 3], b="b", j=["a", "b"]).mv_expand("a")
+    assert (
+        query.render()
+        == """ROW a = [1, 2, 3], b = "b", j = ["a", "b"]
+| MV_EXPAND a"""
+    )
+
+
+def test_rename():
+    query = (
+        ESQL.from_("employees")
+        .keep("first_name", "last_name", "still_hired")
+        .rename(still_hired="employed")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP first_name, last_name, still_hired
+| RENAME still_hired AS employed"""
+    )
+
+
+def test_rerank():
+    query = (
+        ESQL.from_("books")
+        .metadata("_score")
+        .where('MATCH(description, "hobbit")')
+        .sort("_score DESC")
+        .limit(100)
+        .rerank("hobbit")
+        .on("description")
+        .with_(inference_id="test_reranker")
+        .limit(3)
+        .keep("title", "_score")
+    )
+    assert (
+        query.render()
+        == """FROM books METADATA _score
+| WHERE MATCH(description, "hobbit")
+| SORT _score DESC
+| LIMIT 100
+| RERANK "hobbit" ON description WITH {"inference_id": "test_reranker"}
+| LIMIT 3
+| KEEP title, _score"""
+    )
+
+    query = (
+        ESQL.from_("books")
+        .metadata("_score")
+        .where('MATCH(description, "hobbit") OR MATCH(author, "Tolkien")')
+        .sort("_score DESC")
+        .limit(100)
+        .rerank(rerank_score="hobbit")
+        .on("description", "author")
+        .with_(inference_id="test_reranker")
+        .sort("rerank_score")
+        .limit(3)
+        .keep("title", "_score", "rerank_score")
+    )
+    assert (
+        query.render()
+        == """FROM books METADATA _score
+| WHERE MATCH(description, "hobbit") OR MATCH(author, "Tolkien")
+| SORT _score DESC
+| LIMIT 100
+| RERANK rerank_score = "hobbit" ON description, author WITH {"inference_id": "test_reranker"}
+| SORT rerank_score
+| LIMIT 3
+| KEEP title, _score, rerank_score"""
+    )
+
+    query = (
+        ESQL.from_("books")
+        .metadata("_score")
+        .where('MATCH(description, "hobbit") OR MATCH(author, "Tolkien")')
+        .sort("_score DESC")
+        .limit(100)
+        .rerank(rerank_score="hobbit")
+        .on("description", "author")
+        .with_(inference_id="test_reranker")
+        .eval(original_score="_score", _score="rerank_score + original_score")
+        .sort("_score")
+        .limit(3)
+        .keep("title", "original_score", "rerank_score", "_score")
+    )
+    assert (
+        query.render()
+        == """FROM books METADATA _score
+| WHERE MATCH(description, "hobbit") OR MATCH(author, "Tolkien")
+| SORT _score DESC
+| LIMIT 100
+| RERANK rerank_score = "hobbit" ON description, author WITH {"inference_id": "test_reranker"}
+| EVAL original_score = _score, _score = rerank_score + original_score
+| SORT _score
+| LIMIT 3
+| KEEP title, original_score, rerank_score, _score"""
+    )
+
+
+def test_sample():
+    query = ESQL.from_("employees").keep("emp_no").sample(0.05)
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP emp_no
+| SAMPLE 0.05"""
+    )
+
+
+def test_sort():
+    query = (
+        ESQL.from_("employees").keep("first_name", "last_name", "height").sort("height")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP first_name, last_name, height
+| SORT height"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .keep("first_name", "last_name", "height")
+        .sort("height DESC")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP first_name, last_name, height
+| SORT height DESC"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .keep("first_name", "last_name", "height")
+        .sort("height DESC", "first_name ASC")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP first_name, last_name, height
+| SORT height DESC, first_name ASC"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .keep("first_name", "last_name", "height")
+        .sort("first_name ASC NULLS FIRST")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP first_name, last_name, height
+| SORT first_name ASC NULLS FIRST"""
+    )
+
+
+def test_stats():
+    query = (
+        ESQL.from_("employees")
+        .stats(count=functions.count(E("emp_no")))
+        .by("languages")
+        .sort("languages")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| STATS count = COUNT(emp_no)
+        BY languages
+| SORT languages"""
+    )
+
+    query = ESQL.from_("employees").stats(avg_lang=functions.avg(E("languages")))
+    assert (
+        query.render()
+        == """FROM employees
+| STATS avg_lang = AVG(languages)"""
+    )
+
+    query = ESQL.from_("employees").stats(
+        avg_lang=functions.avg(E("languages")), max_lang=functions.max(E("languages"))
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| STATS avg_lang = AVG(languages),
+        max_lang = MAX(languages)"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .stats(
+            avg50s=functions.avg(E("salary")).where('birth_date < "1960-01-01"'),
+            avg60s=functions.avg(E("salary")).where('birth_date >= "1960-01-01"'),
+        )
+        .by("gender")
+        .sort("gender")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| STATS avg50s = AVG(salary) WHERE birth_date < "1960-01-01",
+        avg60s = AVG(salary) WHERE birth_date >= "1960-01-01"
+        BY gender
+| SORT gender"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .eval(Ks="salary / 1000")
+        .stats(
+            under_40K=functions.count(E("*")).where("Ks < 40"),
+            inbetween=functions.count(E("*")).where("40 <= Ks", "Ks < 60"),
+            over_60K=functions.count(E("*")).where("60 <= Ks"),
+            total=functions.count(E("*")),
+        )
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| EVAL Ks = salary / 1000
+| STATS under_40K = COUNT(*) WHERE Ks < 40,
+        inbetween = COUNT(*) WHERE (40 <= Ks) AND (Ks < 60),
+        over_60K = COUNT(*) WHERE 60 <= Ks,
+        total = COUNT(*)"""
+    )
+
+    query = (
+        ESQL.row(i=1, a=["a", "b"]).stats(functions.min(E("i"))).by("a").sort("a ASC")
+    )
+    assert (
+        query.render()
+        == 'ROW i = 1, a = ["a", "b"]\n| STATS MIN(i)\n        BY a\n| SORT a ASC'
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .eval(hired=functions.date_format(E("hire_date"), "yyyy"))
+        .stats(avg_salary=functions.avg(E("salary")))
+        .by("hired", "languages.long")
+        .eval(avg_salary=functions.round(E("avg_salary")))
+        .sort("hired", "languages.long")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| EVAL hired = DATE_FORMAT("yyyy", hire_date)
+| STATS avg_salary = AVG(salary)
+        BY hired, languages.long
+| EVAL avg_salary = ROUND(avg_salary)
+| SORT hired, languages.long"""
+    )
+
+
+def test_where():
+    query = (
+        ESQL.from_("employees")
+        .keep("first_name", "last_name", "still_hired")
+        .where("still_hired == true")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP first_name, last_name, still_hired
+| WHERE still_hired == true"""
+    )
+
+    query = ESQL.from_("sample_data").where("@timestamp > NOW() - 1 hour")
+    assert (
+        query.render()
+        == """FROM sample_data
+| WHERE @timestamp > NOW() - 1 hour"""
+    )
+
+    query = (
+        ESQL.from_("employees")
+        .keep("first_name", "last_name", "height")
+        .where("LENGTH(first_name) < 4")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| KEEP first_name, last_name, height
+| WHERE LENGTH(first_name) < 4"""
+    )
+
+
+def test_and_operator():
+    query = ESQL.from_("index").where(
+        and_(E("age") > 30, E("age") < 40, E("name").is_not_null())
+    )
+    assert (
+        query.render()
+        == """FROM index
+| WHERE (age > 30) AND (age < 40) AND (name IS NOT NULL)"""
+    )
+
+
+def test_or_operator():
+    query = ESQL.from_("index").where(
+        or_(E("age") < 30, E("age") > 40, E("name").is_null())
+    )
+    assert (
+        query.render()
+        == """FROM index
+| WHERE (age < 30) OR (age > 40) OR (name IS NULL)"""
+    )
+
+
+def test_not_operator():
+    query = ESQL.from_("index").where(not_(E("age") > 40))
+    assert (
+        query.render()
+        == """FROM index
+| WHERE NOT (age > 40)"""
+    )
+
+
+def test_in_operator():
+    query = ESQL.row(a=1, b=4, c=3).where((E("c") - E("a")).in_(3, E("b") / 2, "a"))
+    assert (
+        query.render()
+        == """ROW a = 1, b = 4, c = 3
+| WHERE c - a IN (3, b / 2, a)"""
+    )
+
+
+def test_like_operator():
+    query = (
+        ESQL.from_("employees")
+        .where(E("first_name").like("?b*"))
+        .keep("first_name", "last_name")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| WHERE first_name LIKE "?b*"
+| KEEP first_name, last_name"""
+    )
+
+    query = ESQL.row(message="foo * bar").where(E("message").like("foo \\* bar"))
+    assert (
+        query.render()
+        == """ROW message = "foo * bar"
+| WHERE message LIKE "foo \\\\* bar\""""
+    )
+
+    query = ESQL.row(message="foobar").where(E("message").like("foo*", "bar?"))
+    assert (
+        query.render()
+        == """ROW message = "foobar"
+| WHERE message LIKE ("foo*", "bar?")"""
+    )
+
+
+def test_rlike_operator():
+    query = (
+        ESQL.from_("employees")
+        .where(E("first_name").rlike(".leja*"))
+        .keep("first_name", "last_name")
+    )
+    assert (
+        query.render()
+        == """FROM employees
+| WHERE first_name RLIKE ".leja*"
+| KEEP first_name, last_name"""
+    )
+
+    query = ESQL.row(message="foo ( bar").where(E("message").rlike("foo \\( bar"))
+    assert (
+        query.render()
+        == """ROW message = "foo ( bar"
+| WHERE message RLIKE "foo \\\\( bar\""""
+    )
+
+    query = ESQL.row(message="foobar").where(E("message").rlike("foo.*", "bar."))
+    assert (
+        query.render()
+        == """ROW message = "foobar"
+| WHERE message RLIKE ("foo.*", "bar.")"""
+    )
+
+
+def test_match_operator():
+    query = ESQL.from_("books").where(E("author").match("Faulkner"))
+    assert (
+        query.render()
+        == """FROM books
+| WHERE author:"Faulkner\""""
+    )
+
+
+def test_parameters():
+    query = ESQL.from_("employees").where("name == ?")
+    assert query.render() == "FROM employees\n| WHERE name == ?"
+
+    query = ESQL.from_("employees").where(E("name") == E("?"))
+    assert query.render() == "FROM employees\n| WHERE name == ?"
diff -pruN 8.17.2-2/test_elasticsearch/test_exceptions.py 9.2.0-1/test_elasticsearch/test_exceptions.py
--- 8.17.2-2/test_elasticsearch/test_exceptions.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_exceptions.py	2025-10-28 16:50:53.000000000 +0000
@@ -46,3 +46,33 @@ class TestTransformError:
         assert (
             str(e) == "ApiError(500, 'InternalServerError', 'something error message')"
         )
+
+    def test_transform_invalid_media_type_error(self):
+        e = ApiError(
+            message="InvalidMediaType",
+            meta=error_meta,
+            body={
+                "error": {
+                    "root_cause": [
+                        {
+                            "type": "media_type_header_exception",
+                            "reason": "Invalid media-type value on headers [Accept, Content-Type]",
+                        }
+                    ],
+                    "type": "media_type_header_exception",
+                    "reason": "Invalid media-type value on headers [Accept, Content-Type]",
+                    "caused_by": {
+                        "type": "status_exception",
+                        "reason": "Accept version must be either version 8 or 7, but found 9. Accept=application/vnd.elasticsearch+json; compatible-with=9",
+                    },
+                },
+                "status": 400,
+            },
+        )
+
+        assert str(e) == (
+            "ApiError(500, 'InvalidMediaType', "
+            "'Invalid media-type value on headers [Accept, Content-Type]', "
+            "Accept version must be either version 8 or 7, but found 9. "
+            "Accept=application/vnd.elasticsearch+json; compatible-with=9)"
+        )
diff -pruN 8.17.2-2/test_elasticsearch/test_helpers.py 9.2.0-1/test_elasticsearch/test_helpers.py
--- 8.17.2-2/test_elasticsearch/test_helpers.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_helpers.py	2025-10-28 16:50:53.000000000 +0000
@@ -18,6 +18,7 @@
 import pickle
 import threading
 import time
+from typing import Optional
 from unittest import mock
 
 import pytest
@@ -156,21 +157,34 @@ class TestChunkActions:
             {"_source": {"key2": "val2"}, "key": "val", "_op_type": "update"}
         ) == ({"update": {}}, {"key2": "val2"})
 
-    def test_chunks_are_chopped_by_byte_size(self):
+    @pytest.mark.parametrize("flush_seconds", [None, 10])
+    def test_chunks_are_chopped_by_byte_size(self, flush_seconds: Optional[float]):
         assert 100 == len(
-            list(helpers._chunk_actions(self.actions, 100000, 1, JSONSerializer()))
+            list(
+                helpers._chunk_actions(
+                    self.actions, 100000, 1, flush_seconds, JSONSerializer()
+                )
+            )
         )
 
-    def test_chunks_are_chopped_by_chunk_size(self):
+    @pytest.mark.parametrize("flush_seconds", [None, 10])
+    def test_chunks_are_chopped_by_chunk_size(self, flush_seconds: Optional[float]):
         assert 10 == len(
-            list(helpers._chunk_actions(self.actions, 10, 99999999, JSONSerializer()))
+            list(
+                helpers._chunk_actions(
+                    self.actions, 10, 99999999, flush_seconds, JSONSerializer()
+                )
+            )
         )
 
-    def test_chunks_are_chopped_by_byte_size_properly(self):
+    @pytest.mark.parametrize("flush_seconds", [None, 10])
+    def test_chunks_are_chopped_by_byte_size_properly(
+        self, flush_seconds: Optional[float]
+    ):
         max_byte_size = 170
         chunks = list(
             helpers._chunk_actions(
-                self.actions, 100000, max_byte_size, JSONSerializer()
+                self.actions, 100000, max_byte_size, flush_seconds, JSONSerializer()
             )
         )
         assert 25 == len(chunks)
@@ -178,6 +192,30 @@ class TestChunkActions:
             chunk = b"".join(chunk_actions)
             assert len(chunk) <= max_byte_size
 
+    @pytest.mark.parametrize("flush_seconds", [None, 10])
+    def test_chunks_are_chopped_by_flush(self, flush_seconds: Optional[float]):
+        flush = (helpers.BULK_FLUSH, None)
+        actions = (
+            self.actions[:3]
+            + [flush] * 2  # two consecutive flushes after 3 items
+            + self.actions[3:4]
+            + [flush]  # flush after one more item
+            + self.actions[4:]
+            + [flush]  # flush at the end
+        )
+        chunks = list(
+            helpers._chunk_actions(
+                actions, 100, 99999999, flush_seconds, JSONSerializer()
+            )
+        )
+        assert 3 == len(chunks)
+        assert len(chunks[0][0]) == 3
+        assert len(chunks[0][1]) == 6
+        assert len(chunks[1][0]) == 1
+        assert len(chunks[1][1]) == 2
+        assert len(chunks[2][0]) == 96
+        assert len(chunks[2][1]) == 192
+
 
 class TestExpandActions:
     @pytest.mark.parametrize("action", ["whatever", b"whatever"])
diff -pruN 8.17.2-2/test_elasticsearch/test_otel.py 9.2.0-1/test_elasticsearch/test_otel.py
--- 8.17.2-2/test_elasticsearch/test_otel.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_otel.py	2025-10-28 16:50:53.000000000 +0000
@@ -68,7 +68,7 @@ def test_minimal_span():
     assert spans[0].name == "GET"
     assert spans[0].attributes == {
         "http.request.method": "GET",
-        "db.system": "elasticsearch",
+        "db.system.name": "elasticsearch",
     }
 
 
@@ -92,11 +92,11 @@ def test_detailed_span():
     assert spans[0].name == "ml.open_job"
     assert spans[0].attributes == {
         "http.request.method": "GET",
-        "db.system": "elasticsearch",
-        "db.operation": "ml.open_job",
-        "db.elasticsearch.path_parts.job_id": "my-job",
-        "db.elasticsearch.cluster.name": "e9106fc68e3044f0b1475b04bf4ffd5f",
-        "db.elasticsearch.node.name": "instance-0000000001",
+        "db.system.name": "elasticsearch",
+        "db.operation.name": "ml.open_job",
+        "db.operation.parameter.job_id": "my-job",
+        "db.namespace": "e9106fc68e3044f0b1475b04bf4ffd5f",
+        "elasticsearch.node.name": "instance-0000000001",
     }
 
 
diff -pruN 8.17.2-2/test_elasticsearch/test_serializer.py 9.2.0-1/test_elasticsearch/test_serializer.py
--- 8.17.2-2/test_elasticsearch/test_serializer.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_serializer.py	2025-10-28 16:50:53.000000000 +0000
@@ -223,10 +223,10 @@ class TestDeserializer:
 
     def test_deserialize_compatibility_header(self):
         for content_type in (
-            "application/vnd.elasticsearch+json;compatible-with=7",
-            "application/vnd.elasticsearch+json; compatible-with=7",
             "application/vnd.elasticsearch+json;compatible-with=8",
             "application/vnd.elasticsearch+json; compatible-with=8",
+            "application/vnd.elasticsearch+json;compatible-with=9",
+            "application/vnd.elasticsearch+json; compatible-with=9",
         ):
             assert {"some": "data"} == self.serializers.loads(
                 '{"some":"data"}', content_type
@@ -236,10 +236,10 @@ class TestDeserializer:
             )
 
         for content_type in (
-            "application/vnd.elasticsearch+x-ndjson;compatible-with=7",
-            "application/vnd.elasticsearch+x-ndjson; compatible-with=7",
             "application/vnd.elasticsearch+x-ndjson;compatible-with=8",
             "application/vnd.elasticsearch+x-ndjson; compatible-with=8",
+            "application/vnd.elasticsearch+x-ndjson;compatible-with=9",
+            "application/vnd.elasticsearch+x-ndjson; compatible-with=9",
         ):
             assert b'{"some":"data"}\n{"some":"data"}\n' == self.serializers.dumps(
                 ['{"some":"data"}', {"some": "data"}], content_type
diff -pruN 8.17.2-2/test_elasticsearch/test_server/test_helpers.py 9.2.0-1/test_elasticsearch/test_server/test_helpers.py
--- 8.17.2-2/test_elasticsearch/test_server/test_helpers.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_server/test_helpers.py	2025-10-28 16:50:53.000000000 +0000
@@ -16,6 +16,7 @@
 #  under the License.
 
 import json
+import time
 from datetime import datetime, timedelta
 from unittest.mock import call, patch
 
@@ -75,6 +76,47 @@ def test_bulk_all_documents_get_inserted
     assert {"answer": 42} == sync_client.get(index="test-index", id=42)["_source"]
 
 
+def test_explicit_flushes(sync_client):
+    def sync_gen():
+        yield {"answer": 0, "_id": 0}
+        yield {"answer": 1, "_id": 1}
+        yield helpers.BULK_FLUSH
+        time.sleep(0.5)
+        yield {"answer": 2, "_id": 2}
+
+    timestamps = []
+    for ok, item in helpers.streaming_bulk(
+        sync_client, sync_gen(), index="test-index", refresh=True
+    ):
+        assert ok
+        timestamps.append(time.time())
+
+    # make sure there is a pause between the writing of the 2nd and 3rd items
+    assert timestamps[2] - timestamps[1] > (timestamps[1] - timestamps[0]) * 2
+
+
+def test_timeout_flushes(sync_client):
+    def sync_gen():
+        yield {"answer": 0, "_id": 0}
+        yield {"answer": 1, "_id": 1}
+        time.sleep(0.5)
+        yield {"answer": 2, "_id": 2}
+
+    timestamps = []
+    for ok, item in helpers.streaming_bulk(
+        sync_client,
+        sync_gen(),
+        index="test-index",
+        refresh=True,
+        flush_after_seconds=0.05,
+    ):
+        assert ok
+        timestamps.append(time.time())
+
+    # make sure there is a pause between the writing of the 2nd and 3rd items
+    assert timestamps[2] - timestamps[1] > (timestamps[1] - timestamps[0]) * 2
+
+
 def test_bulk_all_errors_from_chunk_are_raised_on_failure(sync_client):
     sync_client.indices.create(
         index="i",
@@ -515,9 +557,10 @@ def test_scroll_error(sync_client):
         bulk.append({"value": x})
     sync_client.bulk(operations=bulk, refresh=True)
 
-    with patch.object(sync_client, "options", return_value=sync_client), patch.object(
-        sync_client, "scroll"
-    ) as scroll_mock:
+    with (
+        patch.object(sync_client, "options", return_value=sync_client),
+        patch.object(sync_client, "scroll") as scroll_mock,
+    ):
         scroll_mock.side_effect = mock_scroll_responses
         data = list(
             helpers.scan(
@@ -547,21 +590,25 @@ def test_scroll_error(sync_client):
 
 
 def test_initial_search_error(sync_client):
-    with patch.object(
-        sync_client,
-        "search",
-        return_value=ObjectApiResponse(
-            meta=None,
-            raw={
-                "_scroll_id": "dummy_id",
-                "_shards": {"successful": 4, "total": 5, "skipped": 0},
-                "hits": {"hits": [{"search_data": 1}]},
-            },
-        ),
-    ), patch.object(sync_client, "options", return_value=sync_client):
-        with patch.object(sync_client, "scroll") as scroll_mock, patch.object(
-            sync_client, "clear_scroll"
-        ) as clear_scroll_mock:
+    with (
+        patch.object(
+            sync_client,
+            "search",
+            return_value=ObjectApiResponse(
+                meta=None,
+                raw={
+                    "_scroll_id": "dummy_id",
+                    "_shards": {"successful": 4, "total": 5, "skipped": 0},
+                    "hits": {"hits": [{"search_data": 1}]},
+                },
+            ),
+        ),
+        patch.object(sync_client, "options", return_value=sync_client),
+    ):
+        with (
+            patch.object(sync_client, "scroll") as scroll_mock,
+            patch.object(sync_client, "clear_scroll") as clear_scroll_mock,
+        ):
             scroll_mock.side_effect = mock_scroll_responses
             data = list(
                 helpers.scan(
@@ -579,9 +626,10 @@ def test_initial_search_error(sync_clien
                 scroll_id="dummy_id",
             )
 
-        with patch.object(sync_client, "scroll") as scroll_mock, patch.object(
-            sync_client, "clear_scroll"
-        ) as clear_scroll_mock:
+        with (
+            patch.object(sync_client, "scroll") as scroll_mock,
+            patch.object(sync_client, "clear_scroll") as clear_scroll_mock,
+        ):
             scroll_mock.side_effect = mock_scroll_responses
             with pytest.raises(ScanError):
                 data = list(
@@ -599,15 +647,16 @@ def test_initial_search_error(sync_clien
 
 
 def test_no_scroll_id_fast_route(sync_client):
-    with patch.object(
-        sync_client,
-        "search",
-        return_value=ObjectApiResponse(meta=None, raw={"no": "_scroll_id"}),
-    ) as search_mock, patch.object(sync_client, "scroll") as scroll_mock, patch.object(
-        sync_client, "clear_scroll"
-    ) as clear_scroll_mock, patch.object(
-        sync_client, "options", return_value=sync_client
-    ) as options:
+    with (
+        patch.object(
+            sync_client,
+            "search",
+            return_value=ObjectApiResponse(meta=None, raw={"no": "_scroll_id"}),
+        ) as search_mock,
+        patch.object(sync_client, "scroll") as scroll_mock,
+        patch.object(sync_client, "clear_scroll") as clear_scroll_mock,
+        patch.object(sync_client, "options", return_value=sync_client) as options,
+    ):
         data = list(helpers.scan(sync_client, index="test_index"))
 
         assert data == []
@@ -636,32 +685,37 @@ def test_no_scroll_id_fast_route(sync_cl
 def test_scan_auth_kwargs_forwarded(sync_client, kwargs):
     ((key, val),) = kwargs.items()
 
-    with patch.object(
-        sync_client, "options", return_value=sync_client
-    ) as options, patch.object(
-        sync_client,
-        "search",
-        return_value=ObjectApiResponse(
-            meta=None,
-            raw={
-                "_scroll_id": "scroll_id",
-                "_shards": {"successful": 5, "total": 5, "skipped": 0},
-                "hits": {"hits": [{"search_data": 1}]},
-            },
+    with (
+        patch.object(sync_client, "options", return_value=sync_client) as options,
+        patch.object(
+            sync_client,
+            "search",
+            return_value=ObjectApiResponse(
+                meta=None,
+                raw={
+                    "_scroll_id": "scroll_id",
+                    "_shards": {"successful": 5, "total": 5, "skipped": 0},
+                    "hits": {"hits": [{"search_data": 1}]},
+                },
+            ),
         ),
-    ), patch.object(
-        sync_client,
-        "scroll",
-        return_value=ObjectApiResponse(
-            meta=None,
-            raw={
-                "_scroll_id": "scroll_id",
-                "_shards": {"successful": 5, "total": 5, "skipped": 0},
-                "hits": {"hits": []},
-            },
+        patch.object(
+            sync_client,
+            "scroll",
+            return_value=ObjectApiResponse(
+                meta=None,
+                raw={
+                    "_scroll_id": "scroll_id",
+                    "_shards": {"successful": 5, "total": 5, "skipped": 0},
+                    "hits": {"hits": []},
+                },
+            ),
+        ),
+        patch.object(
+            sync_client,
+            "clear_scroll",
+            return_value=ObjectApiResponse(meta=None, raw={}),
         ),
-    ), patch.object(
-        sync_client, "clear_scroll", return_value=ObjectApiResponse(meta=None, raw={})
     ):
         data = list(helpers.scan(sync_client, index="test_index", **kwargs))
 
@@ -676,32 +730,37 @@ def test_scan_auth_kwargs_forwarded(sync
 
 
 def test_scan_auth_kwargs_favor_scroll_kwargs_option(sync_client):
-    with patch.object(
-        sync_client, "options", return_value=sync_client
-    ) as options_mock, patch.object(
-        sync_client,
-        "search",
-        return_value=ObjectApiResponse(
-            raw={
-                "_scroll_id": "scroll_id",
-                "_shards": {"successful": 5, "total": 5, "skipped": 0},
-                "hits": {"hits": [{"search_data": 1}]},
-            },
-            meta=None,
-        ),
-    ) as search_mock, patch.object(
-        sync_client,
-        "scroll",
-        return_value=ObjectApiResponse(
-            raw={
-                "_scroll_id": "scroll_id",
-                "_shards": {"successful": 5, "total": 5, "skipped": 0},
-                "hits": {"hits": []},
-            },
-            meta=None,
+    with (
+        patch.object(sync_client, "options", return_value=sync_client) as options_mock,
+        patch.object(
+            sync_client,
+            "search",
+            return_value=ObjectApiResponse(
+                raw={
+                    "_scroll_id": "scroll_id",
+                    "_shards": {"successful": 5, "total": 5, "skipped": 0},
+                    "hits": {"hits": [{"search_data": 1}]},
+                },
+                meta=None,
+            ),
+        ) as search_mock,
+        patch.object(
+            sync_client,
+            "scroll",
+            return_value=ObjectApiResponse(
+                raw={
+                    "_scroll_id": "scroll_id",
+                    "_shards": {"successful": 5, "total": 5, "skipped": 0},
+                    "hits": {"hits": []},
+                },
+                meta=None,
+            ),
+        ) as scroll_mock,
+        patch.object(
+            sync_client,
+            "clear_scroll",
+            return_value=ObjectApiResponse(raw={}, meta=None),
         ),
-    ) as scroll_mock, patch.object(
-        sync_client, "clear_scroll", return_value=ObjectApiResponse(raw={}, meta=None)
     ):
         data = list(
             helpers.scan(
@@ -735,9 +794,11 @@ def test_log_warning_on_shard_failures(s
         bulk.append({"value": x})
     sync_client.bulk(operations=bulk, refresh=True)
 
-    with patch("elasticsearch.helpers.actions.logger") as logger_mock, patch.object(
-        sync_client, "options", return_value=sync_client
-    ), patch.object(sync_client, "scroll") as scroll_mock:
+    with (
+        patch("elasticsearch.helpers.actions.logger") as logger_mock,
+        patch.object(sync_client, "options", return_value=sync_client),
+        patch.object(sync_client, "scroll") as scroll_mock,
+    ):
         scroll_mock.side_effect = mock_scroll_responses
         list(
             helpers.scan(
@@ -773,9 +834,12 @@ def test_clear_scroll(sync_client):
         bulk.append({"value": x})
     sync_client.bulk(operations=bulk, refresh=True)
 
-    with patch.object(sync_client, "options", return_value=sync_client), patch.object(
-        sync_client, "clear_scroll", wraps=sync_client.clear_scroll
-    ) as clear_scroll_mock:
+    with (
+        patch.object(sync_client, "options", return_value=sync_client),
+        patch.object(
+            sync_client, "clear_scroll", wraps=sync_client.clear_scroll
+        ) as clear_scroll_mock,
+    ):
         list(helpers.scan(sync_client, index="test_index", size=2))
         clear_scroll_mock.assert_called_once()
 
@@ -790,19 +854,22 @@ def test_clear_scroll(sync_client):
 
 def test_shards_no_skipped_field(sync_client):
     # Test that scan doesn't fail if 'hits.skipped' isn't available.
-    with patch.object(sync_client, "options", return_value=sync_client), patch.object(
-        sync_client,
-        "search",
-        return_value=ObjectApiResponse(
-            raw={
-                "_scroll_id": "dummy_id",
-                "_shards": {"successful": 5, "total": 5},
-                "hits": {"hits": [{"search_data": 1}]},
-            },
-            meta=None,
+    with (
+        patch.object(sync_client, "options", return_value=sync_client),
+        patch.object(
+            sync_client,
+            "search",
+            return_value=ObjectApiResponse(
+                raw={
+                    "_scroll_id": "dummy_id",
+                    "_shards": {"successful": 5, "total": 5},
+                    "hits": {"hits": [{"search_data": 1}]},
+                },
+                meta=None,
+            ),
         ),
-    ), patch.object(sync_client, "scroll") as scroll_mock, patch.object(
-        sync_client, "clear_scroll"
+        patch.object(sync_client, "scroll") as scroll_mock,
+        patch.object(sync_client, "clear_scroll"),
     ):
         scroll_mock.side_effect = [
             ObjectApiResponse(
@@ -841,18 +908,22 @@ def test_shards_no_skipped_field(sync_cl
     ],
 )
 def test_scan_from_keyword_is_aliased(sync_client, scan_kwargs):
-    with patch.object(sync_client, "options", return_value=sync_client), patch.object(
-        sync_client,
-        "search",
-        return_value=ObjectApiResponse(
-            raw={
-                "_scroll_id": "dummy_id",
-                "_shards": {"successful": 5, "total": 5},
-                "hits": {"hits": []},
-            },
-            meta=None,
-        ),
-    ) as search_mock, patch.object(sync_client, "clear_scroll"):
+    with (
+        patch.object(sync_client, "options", return_value=sync_client),
+        patch.object(
+            sync_client,
+            "search",
+            return_value=ObjectApiResponse(
+                raw={
+                    "_scroll_id": "dummy_id",
+                    "_shards": {"successful": 5, "total": 5},
+                    "hits": {"hits": []},
+                },
+                meta=None,
+            ),
+        ) as search_mock,
+        patch.object(sync_client, "clear_scroll"),
+    ):
         list(helpers.scan(sync_client, index="test_index", **scan_kwargs))
         assert search_mock.call_args[1]["from_"] == 1
         assert "from" not in search_mock.call_args[1]
diff -pruN 8.17.2-2/test_elasticsearch/test_server/test_otel.py 9.2.0-1/test_elasticsearch/test_server/test_otel.py
--- 8.17.2-2/test_elasticsearch/test_server/test_otel.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_server/test_otel.py	2025-10-28 16:50:53.000000000 +0000
@@ -44,9 +44,9 @@ def test_otel_end_to_end(sync_client):
     assert spans[0].name == "search"
     expected_attributes = {
         "http.request.method": "POST",
-        "db.system": "elasticsearch",
-        "db.operation": "search",
-        "db.elasticsearch.path_parts.index": "logs-*",
+        "db.system.name": "elasticsearch",
+        "db.operation.name": "search",
+        "db.operation.parameter.index": "logs-*",
     }
     # Assert expected atttributes are here, but allow other attributes too
     # to make this test robust to elastic-transport changes
@@ -89,8 +89,8 @@ def test_otel_bulk(sync_client, elastics
     parent_span = spans.pop()
     assert parent_span.name == f"helpers.{bulk_helper_name}"
     assert parent_span.attributes == {
-        "db.system": "elasticsearch",
-        "db.operation": f"helpers.{bulk_helper_name}",
+        "db.system.name": "elasticsearch",
+        "db.operation.name": f"helpers.{bulk_helper_name}",
         "http.request.method": "null",
     }
 
@@ -99,9 +99,9 @@ def test_otel_bulk(sync_client, elastics
         assert span.name == "bulk"
         expected_attributes = {
             "http.request.method": "PUT",
-            "db.system": "elasticsearch",
-            "db.operation": "bulk",
-            "db.elasticsearch.path_parts.index": "test-index",
+            "db.system.name": "elasticsearch",
+            "db.operation.name": "bulk",
+            "db.operation.parameter.index": "test-index",
         }
         # Assert expected atttributes are here, but allow other attributes too
         # to make this test robust to elastic-transport changes
diff -pruN 8.17.2-2/test_elasticsearch/test_server/test_rest_api_spec.py 9.2.0-1/test_elasticsearch/test_server/test_rest_api_spec.py
--- 8.17.2-2/test_elasticsearch/test_server/test_rest_api_spec.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_server/test_rest_api_spec.py	2025-10-28 16:50:53.000000000 +0000
@@ -20,6 +20,7 @@ Dynamically generated set of TestCases b
 some integration tests. These files are shared among all official Elasticsearch
 clients.
 """
+import copy
 import io
 import json
 import os
@@ -78,6 +79,7 @@ FAILING_TESTS = {
     "cluster/voting_config_exclusions",
     "entsearch/10_basic",
     "indices/clone",
+    "indices/data_stream_mappings[0]",
     "indices/resolve_cluster",
     "indices/settings",
     "indices/split",
@@ -116,6 +118,7 @@ class YamlRunner:
         self._state = {}
 
     def use_spec(self, test_spec):
+        test_spec = copy.deepcopy(test_spec)
         self._setup_code = test_spec.pop("setup", None)
         self._run_code = test_spec.pop("run", None)
         self._teardown_code = test_spec.pop("teardown", None)
@@ -280,8 +283,6 @@ class YamlRunner:
         self.last_response = exception.body
 
     def run_skip(self, skip):
-        global IMPLEMENTED_FEATURES
-
         if "features" in skip:
             features = skip["features"]
             if not isinstance(features, (tuple, list)):
@@ -437,7 +438,7 @@ class YamlRunner:
         return value
 
     def _feature_enabled(self, name):
-        global XPACK_FEATURES, IMPLEMENTED_FEATURES
+        global XPACK_FEATURES
         if XPACK_FEATURES is None:
             try:
                 xinfo = self.client.xpack.info()
@@ -496,7 +497,7 @@ YAML_TEST_SPECS = []
 # Try loading the REST API test specs from the Elastic Artifacts API
 try:
     # Construct the HTTP and Elasticsearch client
-    http = urllib3.PoolManager(retries=10)
+    http = urllib3.PoolManager(retries=urllib3.Retry(total=10))
 
     yaml_tests_url = (
         "https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/main"
diff -pruN 8.17.2-2/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py 9.2.0-1/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py
--- 8.17.2-2/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py	2025-10-28 16:50:53.000000000 +0000
@@ -485,7 +485,7 @@ class TestVectorStore:
             )
             store.add_texts(texts)
 
-            ## without fetch_k parameter
+            # without fetch_k parameter
             output = store.search(
                 query="foo",
                 k=3,
@@ -551,7 +551,7 @@ class TestVectorStore:
         )
         store.add_texts(texts)
 
-        ## with fetch_k parameter
+        # with fetch_k parameter
         output = store.search(
             query="foo",
             k=3,
diff -pruN 8.17.2-2/test_elasticsearch/test_transport.py 9.2.0-1/test_elasticsearch/test_transport.py
--- 8.17.2-2/test_elasticsearch/test_transport.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_transport.py	2025-10-28 16:50:53.000000000 +0000
@@ -18,7 +18,7 @@
 import re
 import time
 import warnings
-from typing import Any, Dict, Optional, Union
+from typing import Any, Dict, Optional
 
 import pytest
 from elastic_transport import (
@@ -38,7 +38,6 @@ from elasticsearch.exceptions import (
     ElasticsearchWarning,
     UnsupportedProductError,
 )
-from elasticsearch.transport import get_host_info
 
 
 class DummyNode(BaseNode):
@@ -167,23 +166,6 @@ CLUSTER_NODES_MASTER_ONLY = """{
 }"""
 
 
-class TestHostsInfoCallback:
-    def test_master_only_nodes_are_ignored(self):
-        nodes = [
-            {"roles": ["master"]},
-            {"roles": ["master", "data", "ingest"]},
-            {"roles": ["data", "ingest"]},
-            {"roles": []},
-            {},
-        ]
-        chosen = [
-            i
-            for i, node_info in enumerate(nodes)
-            if get_host_info(node_info, i) is not None
-        ]
-        assert [1, 2, 3, 4] == chosen
-
-
 class TestTransport:
     def test_request_timeout_extracted_from_params_and_passed(self):
         client = Elasticsearch(
@@ -285,7 +267,7 @@ class TestTransport:
         calls = client.transport.node_pool.get().calls
         assert 1 == len(calls)
         assert calls[0][1]["headers"] == {
-            "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+            "accept": "application/vnd.elasticsearch+json; compatible-with=9",
         }
 
     def test_meta_header_type_error(self):
@@ -446,7 +428,7 @@ class TestTransport:
             {
                 "body": None,
                 "headers": {
-                    "accept": "application/vnd.elasticsearch+json; compatible-with=8"
+                    "accept": "application/vnd.elasticsearch+json; compatible-with=9"
                 },
                 "request_timeout": None,  # <-- Should be None instead of 12
             },
@@ -470,7 +452,7 @@ class TestTransport:
             {
                 "body": None,
                 "headers": {
-                    "accept": "application/vnd.elasticsearch+json; compatible-with=8"
+                    "accept": "application/vnd.elasticsearch+json; compatible-with=9"
                 },
                 "request_timeout": 12,
             },
@@ -480,7 +462,7 @@ class TestTransport:
             {
                 "body": None,
                 "headers": {
-                    "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                    "accept": "application/vnd.elasticsearch+json; compatible-with=9",
                 },
                 "request_timeout": DEFAULT,
             },
@@ -525,10 +507,8 @@ class TestTransport:
         "kwargs",
         [
             {"sniff_on_start": True},
-            {"sniff_on_connection_fail": True},
             {"sniff_on_node_failure": True},
             {"sniff_before_requests": True},
-            {"sniffer_timeout": 1},
             {"sniff_timeout": 1},
         ],
     )
@@ -589,41 +569,6 @@ class TestTransport:
         ports = {node.config.port for node in client.transport.node_pool.all()}
         assert ports == {9200, 124}
 
-    def test_sniffing_deprecated_host_info_callback(self):
-        def host_info_callback(
-            node_info: Dict[str, Any], host: Dict[str, Union[int, str]]
-        ) -> Dict[str, Any]:
-            return (
-                host if node_info["http"]["publish_address"].endswith(":124") else None
-            )
-
-        with warnings.catch_warnings(record=True) as w:
-            client = Elasticsearch(  # noqa: F821
-                [
-                    NodeConfig(
-                        "http",
-                        "localhost",
-                        9200,
-                        _extras={"data": CLUSTER_NODES_MASTER_ONLY},
-                    )
-                ],
-                node_class=DummyNode,
-                sniff_on_start=True,
-                host_info_callback=host_info_callback,
-            )
-
-        assert len(w) == 1
-        assert w[0].category == DeprecationWarning
-        assert (
-            str(w[0].message)
-            == "The 'host_info_callback' parameter is deprecated in favor of 'sniffed_node_callback'"
-        )
-
-        assert len(client.transport.node_pool) == 2
-
-        ports = {node.config.port for node in client.transport.node_pool.all()}
-        assert ports == {9200, 124}
-
 
 @pytest.mark.parametrize("headers", [{}, {"X-elastic-product": "BAD HEADER"}])
 def test_unsupported_product_error(headers):
@@ -647,7 +592,7 @@ def test_unsupported_product_error(heade
         {
             "body": None,
             "headers": {
-                "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+                "accept": "application/vnd.elasticsearch+json; compatible-with=9",
             },
             "request_timeout": DEFAULT,
         },
diff -pruN 8.17.2-2/test_elasticsearch/test_types/aliased_types.py 9.2.0-1/test_elasticsearch/test_types/aliased_types.py
--- 8.17.2-2/test_elasticsearch/test_types/aliased_types.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_types/aliased_types.py	2025-10-28 16:50:53.000000000 +0000
@@ -34,9 +34,7 @@ es = Elasticsearch(
     [{"host": "localhost", "port": 9443}],
     transport_class=Transport,
     sniff_on_start=True,
-    sniffer_timeout=0.1,
     sniff_timeout=1,
-    sniff_on_connection_fail=False,
     max_retries=1,
     retry_on_status={100, 400, 503},
     retry_on_timeout=True,
@@ -103,9 +101,7 @@ def reindex_types() -> None:
 es2 = AsyncElasticsearch(
     [{"host": "localhost", "port": 9443}],
     sniff_on_start=True,
-    sniffer_timeout=0.1,
     sniff_timeout=1,
-    sniff_on_connection_fail=False,
     max_retries=1,
     retry_on_status={100, 400, 503},
     retry_on_timeout=True,
diff -pruN 8.17.2-2/test_elasticsearch/test_types/async_types.py 9.2.0-1/test_elasticsearch/test_types/async_types.py
--- 8.17.2-2/test_elasticsearch/test_types/async_types.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_types/async_types.py	2025-10-28 16:50:53.000000000 +0000
@@ -28,9 +28,7 @@ from elasticsearch.helpers import (
 es = AsyncElasticsearch(
     [{"host": "localhost", "port": 9443}],
     sniff_on_start=True,
-    sniffer_timeout=0.1,
     sniff_timeout=1,
-    sniff_on_connection_fail=False,
     max_retries=1,
     retry_on_status={100, 400, 503},
     retry_on_timeout=True,
diff -pruN 8.17.2-2/test_elasticsearch/test_types/sync_types.py 9.2.0-1/test_elasticsearch/test_types/sync_types.py
--- 8.17.2-2/test_elasticsearch/test_types/sync_types.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/test_types/sync_types.py	2025-10-28 16:50:53.000000000 +0000
@@ -23,9 +23,7 @@ from elasticsearch.helpers import bulk,
 es = Elasticsearch(
     [{"host": "localhost", "port": 9443}],
     sniff_on_start=True,
-    sniffer_timeout=0.1,
     sniff_timeout=1,
-    sniff_on_connection_fail=False,
     max_retries=1,
     retry_on_status={100, 400, 503},
     retry_on_timeout=True,
diff -pruN 8.17.2-2/test_elasticsearch/utils.py 9.2.0-1/test_elasticsearch/utils.py
--- 8.17.2-2/test_elasticsearch/utils.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/test_elasticsearch/utils.py	2025-10-28 16:50:53.000000000 +0000
@@ -22,10 +22,8 @@ from pathlib import Path
 from typing import Optional, Tuple
 
 from elasticsearch import (
-    AuthorizationException,
     ConnectionError,
     Elasticsearch,
-    NotFoundError,
 )
 
 SOURCE_DIR = Path(__file__).absolute().parent.parent
@@ -118,40 +116,15 @@ def wipe_cluster(client):
     except ImportError:
         pass
 
-    is_xpack = True
-    if is_xpack:
-        wipe_rollup_jobs(client)
-        wait_for_pending_tasks(client, filter="xpack/rollup/job")
-        wipe_slm_policies(client)
-
-        # Searchable snapshot indices start in 7.8+
-        if es_version(client) >= (7, 8):
-            wipe_searchable_snapshot_indices(client)
-
     wipe_snapshots(client)
-    if is_xpack:
-        wipe_data_streams(client)
+    wipe_data_streams(client)
     wipe_indices(client)
 
-    if is_xpack:
-        wipe_xpack_templates(client)
-    else:
-        client.indices.delete_template(name="*")
-        client.indices.delete_index_template(name="*")
-        client.cluster.delete_component_template(name="*")
+    client.indices.delete_template(name="*")
+    client.indices.delete_index_template(name="*")
 
     wipe_cluster_settings(client)
 
-    if is_xpack:
-        wipe_ilm_policies(client)
-        wipe_auto_follow_patterns(client)
-        wipe_tasks(client)
-        wipe_node_shutdown_metadata(client)
-        wait_for_pending_datafeeds_and_jobs(client)
-        wipe_calendars(client)
-        wipe_filters(client)
-        wipe_transforms(client)
-
     wait_for_cluster_state_updates_to_finish(client)
     if close_after_wipe:
         client.close()
@@ -169,16 +142,6 @@ def wipe_cluster_settings(client):
         client.cluster.put_settings(body=new_settings)
 
 
-def wipe_rollup_jobs(client):
-    rollup_jobs = client.rollup.get_jobs(id="_all").get("jobs", ())
-    for job in rollup_jobs:
-        job_id = job["config"]["id"]
-        client.options(ignore_status=404).rollup.stop_job(
-            id=job_id, wait_for_completion=True
-        )
-        client.options(ignore_status=404).rollup.delete_job(id=job_id)
-
-
 def wipe_snapshots(client):
     """Deletes all the snapshots and repositories from the cluster"""
     in_progress_snapshots = []
@@ -216,266 +179,15 @@ def wipe_data_streams(client):
 def wipe_indices(client):
     indices = client.cat.indices().strip().splitlines()
     if len(indices) > 0:
-        index_names = [i.split(" ")[2] for i in indices]
+        index_names = [i.split()[2] for i in indices]
         client.options(ignore_status=404).indices.delete(
             index=",".join(index_names),
             expand_wildcards="all",
         )
 
 
-def wipe_searchable_snapshot_indices(client):
-    cluster_metadata = client.cluster.state(
-        metric="metadata",
-        filter_path="metadata.indices.*.settings.index.store.snapshot",
-    )
-    if cluster_metadata:
-        for index in cluster_metadata["metadata"]["indices"].keys():
-            client.indices.delete(index=index)
-
-
-def wipe_xpack_templates(client):
-    # Delete index templates (including legacy)
-    templates = [
-        x.strip() for x in client.cat.templates(h="name").split("\n") if x.strip()
-    ]
-    for template in templates:
-        if is_xpack_template(template):
-            continue
-        try:
-            client.indices.delete_template(name=template)
-        except NotFoundError as e:
-            if f"index_template [{template}] missing" in str(e):
-                client.indices.delete_index_template(name=template)
-
-    # Delete component templates
-    templates = client.cluster.get_component_template()["component_templates"]
-    templates_to_delete = [
-        template["name"]
-        for template in templates
-        if not is_xpack_template(template["name"])
-    ]
-    if templates_to_delete:
-        client.cluster.delete_component_template(name=",".join(templates_to_delete))
-
-
-def wipe_ilm_policies(client):
-    for policy in client.ilm.get_lifecycle():
-        if (
-            policy
-            not in {
-                "ilm-history-ilm-policy",
-                "slm-history-ilm-policy",
-                "watch-history-ilm-policy",
-                "watch-history-ilm-policy-16",
-                "ml-size-based-ilm-policy",
-                "logs",
-                "metrics",
-                "synthetics",
-                "7-days-default",
-                "30-days-default",
-                "90-days-default",
-                "180-days-default",
-                "365-days-default",
-                ".fleet-actions-results-ilm-policy",
-                ".deprecation-indexing-ilm-policy",
-                ".monitoring-8-ilm-policy",
-            }
-            and "-history-ilm-polcy" not in policy
-            and "-meta-ilm-policy" not in policy
-            and "-data-ilm-policy" not in policy
-            and "@lifecycle" not in policy
-        ):
-            client.ilm.delete_lifecycle(name=policy)
-
-
-def wipe_slm_policies(client):
-    policies = client.slm.get_lifecycle()
-    for policy in policies:
-        if policy not in {"cloud-snapshot-policy"}:
-            client.slm.delete_lifecycle(policy_id=policy)
-
-
-def wipe_auto_follow_patterns(client):
-    for pattern in client.ccr.get_auto_follow_pattern()["patterns"]:
-        client.ccr.delete_auto_follow_pattern(name=pattern["name"])
-
-
-def wipe_node_shutdown_metadata(client):
-    try:
-        shutdown_status = client.shutdown.get_node()
-        # If response contains these two keys the feature flag isn't enabled
-        # on this cluster so skip this step now.
-        if "_nodes" in shutdown_status and "cluster_name" in shutdown_status:
-            return
-
-        for shutdown_node in shutdown_status.get("nodes", []):
-            node_id = shutdown_node["node_id"]
-            client.shutdown.delete_node(node_id=node_id)
-
-    # Elastic Cloud doesn't allow this so we skip.
-    except AuthorizationException:
-        pass
-
-
-def wipe_tasks(client):
-    tasks = client.tasks.list()
-    for node_name, node in tasks.get("node", {}).items():
-        for task_id in node.get("tasks", ()):
-            client.tasks.cancel(task_id=task_id, wait_for_completion=True)
-
-
-def wait_for_pending_tasks(client, filter, timeout=30):
-    end_time = time.time() + timeout
-    while time.time() < end_time:
-        tasks = client.cat.tasks(detailed=True).split("\n")
-        if not any(filter in task for task in tasks):
-            break
-
-
-def wait_for_pending_datafeeds_and_jobs(client: Elasticsearch, timeout=30):
-    end_time = time.time() + timeout
-    while time.time() < end_time:
-        resp = client.ml.get_datafeeds(datafeed_id="*", allow_no_match=True)
-        if resp["count"] == 0:
-            break
-        for datafeed in resp["datafeeds"]:
-            client.options(ignore_status=404).ml.delete_datafeed(
-                datafeed_id=datafeed["datafeed_id"]
-            )
-
-    end_time = time.time() + timeout
-    while time.time() < end_time:
-        resp = client.ml.get_jobs(job_id="*", allow_no_match=True)
-        if resp["count"] == 0:
-            break
-        for job in resp["jobs"]:
-            client.options(ignore_status=404).ml.close_job(job_id=job["job_id"])
-            client.options(ignore_status=404).ml.delete_job(job_id=job["job_id"])
-
-    end_time = time.time() + timeout
-    while time.time() < end_time:
-        resp = client.ml.get_data_frame_analytics(id="*")
-        if resp["count"] == 0:
-            break
-        for job in resp["data_frame_analytics"]:
-            client.options(ignore_status=404).ml.stop_data_frame_analytics(id=job["id"])
-            client.options(ignore_status=404).ml.delete_data_frame_analytics(
-                id=job["id"]
-            )
-
-
-def wipe_filters(client: Elasticsearch, timeout=30):
-    end_time = time.time() + timeout
-    while time.time() < end_time:
-        resp = client.ml.get_filters(filter_id="*")
-        if resp["count"] == 0:
-            break
-        for filter in resp["filters"]:
-            client.options(ignore_status=404).ml.delete_filter(
-                filter_id=filter["filter_id"]
-            )
-
-
-def wipe_calendars(client: Elasticsearch, timeout=30):
-    end_time = time.time() + timeout
-    while time.time() < end_time:
-        resp = client.ml.get_calendars(calendar_id="*")
-        if resp["count"] == 0:
-            break
-        for calendar in resp["calendars"]:
-            client.options(ignore_status=404).ml.delete_calendar(
-                calendar_id=calendar["calendar_id"]
-            )
-
-
-def wipe_transforms(client: Elasticsearch, timeout=30):
-    end_time = time.time() + timeout
-    while time.time() < end_time:
-        resp = client.transform.get_transform(transform_id="*")
-        if resp["count"] == 0:
-            break
-        for trasnform in resp["transforms"]:
-            client.options(ignore_status=404).transform.stop_transform(
-                transform_id=trasnform["id"]
-            )
-            client.options(ignore_status=404).transform.delete_transform(
-                transform_id=trasnform["id"]
-            )
-
-
 def wait_for_cluster_state_updates_to_finish(client, timeout=30):
     end_time = time.time() + timeout
     while time.time() < end_time:
         if not client.cluster.pending_tasks().get("tasks", ()):
             break
-
-
-def is_xpack_template(name):
-    if name.startswith("."):
-        return True
-    elif name.startswith("behavioral_analytics-events"):
-        return True
-    elif name.startswith("elastic-connectors-"):
-        return True
-    elif name.startswith("entities_v1_"):
-        return True
-    elif name.endswith("@ilm"):
-        return True
-    elif name.endswith("@template"):
-        return True
-
-    return name in {
-        "agentless",
-        "agentless@mappings",
-        "agentless@settings",
-        "apm-10d@lifecycle",
-        "apm-180d@lifecycle",
-        "apm-390d@lifecycle",
-        "apm-90d@lifecycle",
-        "apm@mappings",
-        "apm@settings",
-        "data-streams-mappings",
-        "data-streams@mappings",
-        "elastic-connectors",
-        "ecs@dynamic_templates",
-        "ecs@mappings",
-        "ilm-history-7",
-        "kibana-reporting@settings",
-        "logs",
-        "logs-apm.error@mappings",
-        "logs-apm@settings",
-        "logs-mappings",
-        "logs@mappings",
-        "logs-settings",
-        "logs@settings",
-        "metrics",
-        "metrics-apm@mappings",
-        "metrics-apm.service_destination@mappings",
-        "metrics-apm.service_summary@mappings",
-        "metrics-apm.service_transaction@mappings",
-        "metrics-apm@settings",
-        "metrics-apm.transaction@mappings",
-        "metrics-mappings",
-        "metrics@mappings",
-        "metrics-settings",
-        "metrics@settings",
-        "metrics-tsdb-settings",
-        "metrics@tsdb-settings",
-        "search-acl-filter",
-        "synthetics",
-        "synthetics-mappings",
-        "synthetics@mappings",
-        "synthetics-settings",
-        "synthetics@settings",
-        "traces-apm@mappings",
-        "traces-apm.rum@mappings",
-        "traces@mappings",
-        "traces@settings",
-        # otel
-        "metrics-otel@mappings",
-        "semconv-resource-to-ecs@mappings",
-        "traces-otel@mappings",
-        "ecs-tsdb@mappings",
-        "logs-otel@mappings",
-        "otel@mappings",
-    }
diff -pruN 8.17.2-2/utils/build-dists.py 9.2.0-1/utils/build-dists.py
--- 8.17.2-2/utils/build-dists.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/utils/build-dists.py	2025-10-28 16:50:53.000000000 +0000
@@ -42,7 +42,6 @@ def set_tmp_dir():
 
 
 def run(*argv, expect_exit_code=0):
-    global tmp_dir
     try:
         prev_dir = os.getcwd()
         if tmp_dir is None:
@@ -122,6 +121,7 @@ def test_dist(dist):
                 "--install-types",
                 "--non-interactive",
                 "--ignore-missing-imports",
+                "--implicit-reexport",
                 os.path.join(base_dir, "test_elasticsearch/test_types/async_types.py"),
             )
 
@@ -146,6 +146,7 @@ def test_dist(dist):
                 "--install-types",
                 "--non-interactive",
                 "--ignore-missing-imports",
+                "--implicit-reexport",
                 os.path.join(base_dir, "test_elasticsearch/test_types/sync_types.py"),
             )
         else:
@@ -157,6 +158,7 @@ def test_dist(dist):
                 "--install-types",
                 "--non-interactive",
                 "--ignore-missing-imports",
+                "--implicit-reexport",
                 os.path.join(
                     base_dir, "test_elasticsearch/test_types/aliased_types.py"
                 ),
diff -pruN 8.17.2-2/utils/dsl-generator.py 9.2.0-1/utils/dsl-generator.py
--- 8.17.2-2/utils/dsl-generator.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/utils/dsl-generator.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,1023 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import json
+import re
+import subprocess
+import textwrap
+from urllib.error import HTTPError
+from urllib.request import urlopen
+
+from jinja2 import Environment, PackageLoader, select_autoescape
+
+jinja_env = Environment(
+    loader=PackageLoader("utils"),
+    autoescape=select_autoescape(),
+    trim_blocks=True,
+    lstrip_blocks=True,
+)
+field_py = jinja_env.get_template("field.py.tpl")
+query_py = jinja_env.get_template("query.py.tpl")
+aggs_py = jinja_env.get_template("aggs.py.tpl")
+response_init_py = jinja_env.get_template("response.__init__.py.tpl")
+types_py = jinja_env.get_template("types.py.tpl")
+
+# map with name replacements for Elasticsearch attributes
+PROP_REPLACEMENTS = {"from": "from_", "global": "global_"}
+
+# map with Elasticsearch type replacements
+# keys and values are in given in "{namespace}:{name}" format
+TYPE_REPLACEMENTS = {
+    "_types.query_dsl:DistanceFeatureQuery": "_types.query_dsl:DistanceFeatureQueryBase",
+}
+
+# some aggregation types are complicated to determine from the schema, so they
+# have their correct type here
+AGG_TYPES = {
+    "bucket_count_ks_test": "Pipeline",
+    "bucket_correlation": "Pipeline",
+    "bucket_sort": "Bucket",
+    "categorize_text": "Bucket",
+    "filter": "Bucket",
+    "moving_avg": "Pipeline",
+    "variable_width_histogram": "Bucket",
+}
+
+
+def property_to_class_name(name):
+    return "".join([w.title() if w != "ip" else "IP" for w in name.split("_")])
+
+
+def wrapped_doc(text, width=70, initial_indent="", subsequent_indent=""):
+    """Formats a docstring as a list of lines of up to the request width."""
+    return textwrap.wrap(
+        text.replace("\n", " "),
+        width=width,
+        initial_indent=initial_indent,
+        subsequent_indent=subsequent_indent,
+    )
+
+
+def add_dict_type(type_):
+    """Add Dict[str, Any] to a Python type hint."""
+    if type_.startswith("Union["):
+        type_ = f"{type_[:-1]}, Dict[str, Any]]"
+    else:
+        type_ = f"Union[{type_}, Dict[str, Any]]"
+    return type_
+
+
+def add_seq_dict_type(type_):
+    """Add Sequence[Dict[str, Any]] to a Python type hint."""
+    if type_.startswith("Union["):
+        type_ = f"{type_[:-1]}, Sequence[Dict[str, Any]]]"
+    else:
+        type_ = f"Union[{type_}, Sequence[Dict[str, Any]]]"
+    return type_
+
+
+def add_not_set(type_):
+    """Add DefaultType to a Python type hint."""
+    if type_.startswith("Union["):
+        type_ = f'{type_[:-1]}, "DefaultType"]'
+    else:
+        type_ = f'Union[{type_}, "DefaultType"]'
+    return type_
+
+
+def type_for_types_py(type_):
+    """Converts a type rendered in a generic way to the format needed in the
+    types.py module.
+    """
+    type_ = type_.replace('"DefaultType"', "DefaultType")
+    type_ = type_.replace('"InstrumentedField"', "InstrumentedField")
+    type_ = re.sub(r'"(function\.[a-zA-Z0-9_]+)"', r"\1", type_)
+    type_ = re.sub(r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', type_)
+    type_ = re.sub(r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", type_)
+    return type_
+
+
+class ElasticsearchSchema:
+    """Operations related to the Elasticsearch schema."""
+
+    def __init__(self, version="main"):
+        response = None
+        for branch in [version, "main"]:
+            url = f"https://raw.githubusercontent.com/elastic/elasticsearch-specification/{branch}/output/schema/schema.json"
+            try:
+                response = urlopen(url)
+                print(f"Initializing code generation with '{branch}' specification.")
+                break
+            except HTTPError:
+                continue
+        if not response:
+            raise RuntimeError("Could not download Elasticsearch schema")
+        self.schema = json.loads(response.read())
+
+        # Interfaces collects interfaces that are seen while traversing the schema.
+        # Any interfaces collected here are then rendered as Python in the
+        # types.py module.
+        self.interfaces = []
+        self.response_interfaces = []
+
+    def find_type(self, name, namespace=None):
+        for t in self.schema["types"]:
+            if t["name"]["name"] == name and (
+                namespace is None or t["name"]["namespace"] == namespace
+            ):
+                return t
+
+    def inherits_from(self, type_, name, namespace=None):
+        while "inherits" in type_:
+            type_ = self.find_type(
+                type_["inherits"]["type"]["name"],
+                type_["inherits"]["type"]["namespace"],
+            )
+            if type_["name"]["name"] == name and (
+                namespace is None or type_["name"]["namespace"] == namespace
+            ):
+                return True
+        return False
+
+    def get_python_type(self, schema_type, for_response=False):
+        """Obtain Python typing details for a given schema type
+
+        This method returns a tuple. The first element is a string with the
+        Python type hint. The second element is a dictionary with Python DSL
+        specific typing details to be stored in the DslBase._param_defs
+        attribute (or None if the type does not need to be in _param_defs).
+
+        When `for_response` is `False`, any new interfaces that are discovered
+        are registered to be generated in "request" style, with alternative
+        Dict type hints and default values. If `for_response` is `True`,
+        interfaces are generated just with their declared type, without
+        Dict alternative and without defaults, to help type checkers be more
+        effective at parsing response expressions.
+        """
+        if schema_type["kind"] == "instance_of":
+            type_name = schema_type["type"]
+            if type_name["namespace"] in ["_types", "internal", "_builtins"]:
+                if type_name["name"] in ["integer", "uint", "long", "ulong"]:
+                    return "int", None
+                elif type_name["name"] in ["number", "float", "double"]:
+                    return "float", None
+                elif type_name["name"] == "string":
+                    return "str", None
+                elif type_name["name"] == "boolean":
+                    return "bool", None
+                elif type_name["name"] == "binary":
+                    return "bytes", None
+                elif type_name["name"] == "null":
+                    return "None", None
+                elif type_name["name"] == "Field":
+                    if for_response:
+                        return "str", None
+                    else:
+                        return 'Union[str, "InstrumentedField"]', None
+                else:
+                    # not an instance of a native type, so we get the type and try again
+                    return self.get_python_type(
+                        self.find_type(type_name["name"], type_name["namespace"]),
+                        for_response=for_response,
+                    )
+            elif (
+                type_name["namespace"] == "_types.query_dsl"
+                and type_name["name"] == "QueryContainer"
+            ):
+                # QueryContainer maps to the DSL's Query class
+                return "Query", {"type": "query"}
+            elif (
+                type_name["namespace"] == "_global.search._types"
+                and type_name["name"] == "SearchRequestBody"
+            ):
+                # we currently do not provide specific typing for this one
+                return "Dict[str, Any]", None
+            elif (
+                type_name["namespace"] == "_types.query_dsl"
+                and type_name["name"] == "FunctionScoreContainer"
+            ):
+                # FunctionScoreContainer maps to the DSL's ScoreFunction class
+                return "ScoreFunction", {"type": "score_function"}
+            elif (
+                type_name["namespace"] == "_types.aggregations"
+                and type_name["name"] == "Buckets"
+            ):
+                if for_response:
+                    return "Union[Sequence[Any], Dict[str, Any]]", None
+                else:
+                    return "Dict[str, Query]", {"type": "query", "hash": True}
+            elif (
+                type_name["namespace"] == "_types.aggregations"
+                and type_name["name"] == "CompositeAggregationSource"
+            ):
+                # CompositeAggreagationSource maps to the DSL's Agg class
+                return "Agg[_R]", None
+            else:
+                # for any other instances we get the type and recurse
+                type_ = self.find_type(type_name["name"], type_name["namespace"])
+                if type_:
+                    return self.get_python_type(type_, for_response=for_response)
+
+        elif schema_type["kind"] == "type_alias":
+            # for an alias, we use the aliased type
+            return self.get_python_type(schema_type["type"], for_response=for_response)
+
+        elif schema_type["kind"] == "array_of":
+            # for arrays we use Sequence[element_type]
+            type_, param = self.get_python_type(
+                schema_type["value"], for_response=for_response
+            )
+            return f"Sequence[{type_}]", {**param, "multi": True} if param else None
+
+        elif schema_type["kind"] == "dictionary_of":
+            # for dicts we use Mapping[key_type, value_type]
+            key_type, key_param = self.get_python_type(
+                schema_type["key"], for_response=for_response
+            )
+            value_type, value_param = self.get_python_type(
+                schema_type["value"], for_response=for_response
+            )
+            return f"Mapping[{key_type}, {value_type}]", (
+                {**value_param, "hash": True} if value_param else None
+            )
+
+        elif schema_type["kind"] == "union_of":
+            if (
+                len(schema_type["items"]) == 2
+                and schema_type["items"][0]["kind"] == "instance_of"
+                and schema_type["items"][1]["kind"] == "array_of"
+                and schema_type["items"][0] == schema_type["items"][1]["value"]
+            ):
+                # special kind of unions in the form Union[type, Sequence[type]]
+                type_, param = self.get_python_type(
+                    schema_type["items"][0], for_response=for_response
+                )
+                if schema_type["items"][0]["type"]["name"] in [
+                    "CompletionSuggestOption",
+                    "PhraseSuggestOption",
+                    "TermSuggestOption",
+                ]:
+                    # for suggest types we simplify this type and return just the array form
+                    return (
+                        f"Sequence[{type_}]",
+                        ({"type": param["type"], "multi": True} if param else None),
+                    )
+                else:
+                    # for every other types we produce an union with the two alternatives
+                    return (
+                        f"Union[{type_}, Sequence[{type_}]]",
+                        ({"type": param["type"], "multi": True} if param else None),
+                    )
+            elif (
+                len(schema_type["items"]) == 2
+                and schema_type["items"][0]["kind"] == "instance_of"
+                and schema_type["items"][1]["kind"] == "instance_of"
+                and schema_type["items"][0]["type"]
+                == {"name": "T", "namespace": "_spec_utils.PipeSeparatedFlags"}
+                and schema_type["items"][1]["type"]
+                == {"name": "string", "namespace": "_builtins"}
+            ):
+                # for now we treat PipeSeparatedFlags as a special case
+                if "PipeSeparatedFlags" not in self.interfaces:
+                    self.interfaces.append("PipeSeparatedFlags")
+                return '"types.PipeSeparatedFlags"', None
+            else:
+                # generic union type
+                types = list(
+                    dict.fromkeys(  # eliminate duplicates
+                        [
+                            self.get_python_type(t, for_response=for_response)
+                            for t in schema_type["items"]
+                        ]
+                    )
+                )
+                if len(types) == 1:
+                    return types[0]
+                return "Union[" + ", ".join([type_ for type_, _ in types]) + "]", None
+
+        elif schema_type["kind"] == "enum":
+            # enums are mapped to Literal[member, ...]
+            t = (
+                "Literal["
+                + ", ".join(
+                    [f"\"{member['name']}\"" for member in schema_type["members"]]
+                )
+                + "]"
+            )
+            if {"name": "true"} in schema_type["members"] and {
+                "name": "false"
+            } in schema_type["members"]:
+                # this is a boolean that was later upgraded to an enum, so we
+                # should also allow bools
+                t = f"Union[{t}, bool]"
+            return t, None
+
+        elif schema_type["kind"] == "interface":
+            if schema_type["name"]["namespace"] == "_types.query_dsl":
+                # handle specific DSL classes explicitly to map to existing
+                # Python DSL classes
+                if schema_type["name"]["name"].endswith("RangeQuery"):
+                    return '"wrappers.Range[Any]"', None
+                elif schema_type["name"]["name"].endswith("ScoreFunction"):
+                    name = schema_type["name"]["name"].removesuffix("Function")
+                    return f'"function.{name}"', None
+                elif schema_type["name"]["name"].endswith("DecayFunction"):
+                    return '"function.DecayFunction"', None
+                elif schema_type["name"]["name"].endswith("Function"):
+                    return f"\"function.{schema_type['name']['name']}\"", None
+            elif schema_type["name"]["namespace"] == "_types.analysis" and schema_type[
+                "name"
+            ]["name"].endswith("Analyzer"):
+                # not expanding analyzers at this time, maybe in the future
+                return "str, Dict[str, Any]", None
+            elif schema_type["name"]["namespace"] == "_types.aggregations":
+                if (
+                    schema_type["name"]["name"].endswith("AggregationRange")
+                    or schema_type["name"]["name"] == "DateRangeExpression"
+                ) and schema_type["name"]["name"] != "IpRangeAggregationRange":
+                    return '"wrappers.AggregationRange"', None
+
+            # to handle other interfaces we generate a type of the same name
+            # and add the interface to the interfaces.py module
+            if schema_type["name"]["name"] not in self.interfaces:
+                self.interfaces.append(schema_type["name"]["name"])
+                if for_response:
+                    self.response_interfaces.append(schema_type["name"]["name"])
+            return f"\"types.{schema_type['name']['name']}\"", None
+        elif schema_type["kind"] == "user_defined_value":
+            # user_defined_value maps to Python's Any type
+            return "Any", None
+
+        raise RuntimeError(f"Cannot find Python type for {schema_type}")
+
+    def add_attribute(self, k, arg, for_types_py=False, for_response=False):
+        """Add an attribute to the internal representation of a class.
+
+        This method adds the argument `arg` to the data structure for a class
+        stored in `k`. In particular, the argument is added to the `k["args"]`
+        list, making sure required arguments are first in the list. If the
+        argument is of a type that needs Python DSL specific typing details to
+        be stored in the DslBase._param_defs attribute, then this is added to
+        `k["params"]`.
+
+        When `for_types_py` is `True`, type hints are formatted in the most
+        convenient way for the types.py file. When possible, double quotes are
+        removed from types, and for types that are in the same file the quotes
+        are kept to prevent forward references, but the "types." namespace is
+        removed. When `for_types_py` is `False`, all non-native types use
+        quotes and are namespaced.
+
+        When `for_response` is `True`, type hints are not given the optional
+        dictionary representation, nor the `DefaultType` used for omitted
+        attributes.
+        """
+        try:
+            type_, param = self.get_python_type(arg["type"], for_response=for_response)
+        except RuntimeError:
+            type_ = "Any"
+            param = None
+        if not for_response:
+            if type_ != "Any":
+                if (
+                    'Sequence["types.' in type_
+                    or 'Sequence["wrappers.AggregationRange' in type_
+                ):
+                    type_ = add_seq_dict_type(type_)  # interfaces can be given as dicts
+                elif "types." in type_ or "wrappers.AggregationRange" in type_:
+                    type_ = add_dict_type(type_)  # interfaces can be given as dicts
+                type_ = add_not_set(type_)
+        if for_types_py:
+            type_ = type_for_types_py(type_)
+        required = "(required) " if arg["required"] else ""
+        server_default = (
+            f" Defaults to `{arg['serverDefault']}` if omitted."
+            if arg.get("serverDefault")
+            else ""
+        )
+        doc = wrapped_doc(
+            f":arg {arg['name']}: {required}{arg.get('description', '')}{server_default}",
+            subsequent_indent="    ",
+        )
+        arg = {
+            "name": PROP_REPLACEMENTS.get(arg["name"], arg["name"]),
+            "type": type_,
+            "doc": doc,
+            "required": arg["required"],
+        }
+        if param is not None:
+            param = {"name": arg["name"], "param": param}
+        if arg["required"]:
+            # insert in the right place so that all required arguments
+            # appear at the top of the argument list
+            i = 0
+            for i in range(len(k["args"]) + 1):
+                if i == len(k["args"]):
+                    break
+                if k["args"][i].get("positional"):
+                    continue
+                if k["args"][i]["required"] is False:
+                    break
+            k["args"].insert(i, arg)
+        else:
+            k["args"].append(arg)
+        if param and "params" in k:
+            k["params"].append(param)
+
+    def add_behaviors(self, type_, k, for_types_py=False, for_response=False):
+        """Add behaviors reported in the specification of the given type to the
+        class representation.
+        """
+        if "behaviors" in type_:
+            for behavior in type_["behaviors"]:
+                if (
+                    behavior["type"]["name"] != "AdditionalProperty"
+                    or behavior["type"]["namespace"] != "_spec_utils"
+                ):
+                    # we do not support this behavior, so we ignore it
+                    continue
+                key_type, _ = self.get_python_type(
+                    behavior["generics"][0], for_response=for_response
+                )
+                if "InstrumentedField" in key_type:
+                    value_type, _ = self.get_python_type(
+                        behavior["generics"][1], for_response=for_response
+                    )
+                    if for_types_py:
+                        value_type = value_type.replace('"DefaultType"', "DefaultType")
+                        value_type = value_type.replace(
+                            '"InstrumentedField"', "InstrumentedField"
+                        )
+                        value_type = re.sub(
+                            r'"(function\.[a-zA-Z0-9_]+)"', r"\1", value_type
+                        )
+                        value_type = re.sub(
+                            r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', value_type
+                        )
+                        value_type = re.sub(
+                            r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", value_type
+                        )
+                    k["args"].append(
+                        {
+                            "name": "_field",
+                            "type": add_not_set(key_type),
+                            "doc": [":arg _field: The field to use in this query."],
+                            "required": False,
+                            "positional": True,
+                        }
+                    )
+                    k["args"].append(
+                        {
+                            "name": "_value",
+                            "type": add_not_set(add_dict_type(value_type)),
+                            "doc": [":arg _value: The query value for the field."],
+                            "required": False,
+                            "positional": True,
+                        }
+                    )
+                    k["is_single_field"] = True
+                else:
+                    raise RuntimeError(
+                        f"Non-field AdditionalProperty are not supported for interface {type_['name']['namespace']}:{type_['name']['name']}."
+                    )
+
+    def property_to_python_class(self, p):
+        """Return a dictionary with template data necessary to render a schema
+        property as a Python class.
+
+        Used for "container" sub-classes such as `QueryContainer`, where each
+        sub-class is represented by a Python DSL class.
+
+        The format is as follows:
+
+        ```python
+        {
+            "property_name": "the name of the property",
+            "name": "the class name to use for the property",
+            "docstring": "the formatted docstring as a list of strings",
+            "args": [  # a Python description of each class attribute
+                "name": "the name of the attribute",
+                "type": "the Python type hint for the attribute",
+                "doc": ["formatted lines of documentation to add to class docstring"],
+                "required": bool,
+                "positional": bool,
+            ],
+            "params": [
+                "name": "the attribute name",
+                "param": "the param dictionary to include in `_param_defs` for the class",
+            ],  # a DSL-specific description of interesting attributes
+            "is_single_field": bool  # True for single-key dicts with field key
+            "is_multi_field": bool  # True for multi-key dicts with field keys
+        }
+        ```
+        """
+        k = {
+            "property_name": p["name"],
+            "name": property_to_class_name(p["name"]),
+        }
+        k["docstring"] = wrapped_doc(p.get("description") or "")
+        other_classes = []
+        kind = p["type"]["kind"]
+        if kind == "instance_of":
+            namespace = p["type"]["type"]["namespace"]
+            name = p["type"]["type"]["name"]
+            if f"{namespace}:{name}" in TYPE_REPLACEMENTS:
+                namespace, name = TYPE_REPLACEMENTS[f"{namespace}:{name}"].split(":")
+            if name == "QueryContainer" and namespace == "_types.query_dsl":
+                type_ = {
+                    "kind": "interface",
+                    "properties": [p],
+                }
+            else:
+                type_ = self.find_type(name, namespace)
+            if p["name"] in AGG_TYPES:
+                k["parent"] = AGG_TYPES[p["name"]]
+
+            if type_["kind"] == "interface":
+                # set the correct parent for bucket and pipeline aggregations
+                if self.inherits_from(
+                    type_, "PipelineAggregationBase", "_types.aggregations"
+                ):
+                    k["parent"] = "Pipeline"
+                elif self.inherits_from(
+                    type_, "BucketAggregationBase", "_types.aggregations"
+                ):
+                    k["parent"] = "Bucket"
+
+                # generate class attributes
+                k["args"] = []
+                k["params"] = []
+                self.add_behaviors(type_, k)
+                while True:
+                    for arg in type_["properties"]:
+                        self.add_attribute(k, arg)
+                    if "inherits" in type_ and "type" in type_["inherits"]:
+                        type_ = self.find_type(
+                            type_["inherits"]["type"]["name"],
+                            type_["inherits"]["type"]["namespace"],
+                        )
+                    else:
+                        break
+
+            elif type_["kind"] == "type_alias":
+                if type_["type"]["kind"] == "union_of":
+                    # for unions we create sub-classes
+                    for other in type_["type"]["items"]:
+                        other_class = self.interface_to_python_class(
+                            other["type"]["name"],
+                            other["type"]["namespace"],
+                            for_types_py=False,
+                        )
+                        other_class["parent"] = k["name"]
+                        other_classes.append(other_class)
+                else:
+                    raise RuntimeError(
+                        "Cannot generate code for instances of type_alias instances that are not unions."
+                    )
+
+            else:
+                raise RuntimeError(
+                    f"Cannot generate code for instances of kind '{type_['kind']}'"
+                )
+
+        elif kind == "dictionary_of":
+            key_type, _ = self.get_python_type(p["type"]["key"])
+            if "InstrumentedField" in key_type:
+                value_type, _ = self.get_python_type(p["type"]["value"])
+                if p["type"]["singleKey"]:
+                    # special handling for single-key dicts with field key
+                    k["args"] = [
+                        {
+                            "name": "_field",
+                            "type": add_not_set(key_type),
+                            "doc": [":arg _field: The field to use in this query."],
+                            "required": False,
+                            "positional": True,
+                        },
+                        {
+                            "name": "_value",
+                            "type": add_not_set(add_dict_type(value_type)),
+                            "doc": [":arg _value: The query value for the field."],
+                            "required": False,
+                            "positional": True,
+                        },
+                    ]
+                    k["is_single_field"] = True
+                else:
+                    # special handling for multi-key dicts with field keys
+                    k["args"] = [
+                        {
+                            "name": "_fields",
+                            "type": f"Optional[Mapping[{key_type}, {value_type}]]",
+                            "doc": [
+                                ":arg _fields: A dictionary of fields with their values."
+                            ],
+                            "required": False,
+                            "positional": True,
+                        },
+                    ]
+                    k["is_multi_field"] = True
+            else:
+                raise RuntimeError(f"Cannot generate code for type {p['type']}")
+
+        else:
+            raise RuntimeError(f"Cannot generate code for type {p['type']}")
+        return [k] + other_classes
+
+    def interface_to_python_class(
+        self,
+        interface,
+        namespace=None,
+        *,
+        for_types_py=True,
+        for_response=False,
+    ):
+        """Return a dictionary with template data necessary to render an
+        interface a Python class.
+
+        This is used to render interfaces that are referenced by container
+        classes. The current list of rendered interfaces is passed as a second
+        argument to allow this method to add more interfaces to it as they are
+        discovered.
+
+        The returned format is as follows:
+
+        ```python
+        {
+            "name": "the class name to use for the interface class",
+            "parent": "the parent class name",
+            "args": [ # a Python description of each class attribute
+                "name": "the name of the attribute",
+                "type": "the Python type hint for the attribute",
+                "doc": ["formatted lines of documentation to add to class docstring"],
+                "required": bool,
+                "positional": bool,
+            ],
+            "buckets_as_dict": "type" # optional, only present in aggregation response
+                                      # classes that have buckets that can have a list
+                                      # or dict representation
+        }
+        ```
+        """
+        type_ = self.find_type(interface, namespace)
+        if type_["kind"] not in ["interface", "response"]:
+            raise RuntimeError(f"Type {interface} is not an interface")
+        if type_["kind"] == "response":
+            # we consider responses as interfaces because they also have properties
+            # but the location of the properties is different
+            type_ = type_["body"]
+        k = {"name": interface, "for_response": for_response, "args": []}
+        k["docstring"] = wrapped_doc(type_.get("description") or "")
+        self.add_behaviors(
+            type_, k, for_types_py=for_types_py, for_response=for_response
+        )
+        generics = []
+        while True:
+            for arg in type_["properties"]:
+                if interface == "ResponseBody" and arg["name"] == "hits":
+                    k["args"].append(
+                        {
+                            "name": "hits",
+                            "type": "Sequence[_R]",
+                            "doc": [":arg hits: search results"],
+                            "required": arg["required"],
+                        }
+                    )
+                elif interface == "ResponseBody" and arg["name"] == "aggregations":
+                    # Aggregations are tricky because the DSL client uses a
+                    # flexible representation that is difficult to generate
+                    # from the schema.
+                    # To handle this we let the generator do its work by calling
+                    # `add_attribute()`, but then we save the generated attribute
+                    # apart and replace it with the DSL's `AggResponse` class.
+                    # The generated type is then used in type hints in variables
+                    # and methods of this class.
+                    self.add_attribute(
+                        k, arg, for_types_py=for_types_py, for_response=for_response
+                    )
+                    k["aggregate_type"] = (
+                        k["args"][-1]["type"]
+                        .split("Mapping[str, ")[1]
+                        .rsplit("]", 1)[0]
+                    )
+                    k["args"][-1] = {
+                        "name": "aggregations",
+                        "type": '"AggResponse[_R]"',
+                        "doc": [":arg aggregations: aggregation results"],
+                        "required": arg["required"],
+                    }
+                elif (
+                    "name" in type_
+                    and type_["name"]["name"] == "MultiBucketAggregateBase"
+                    and arg["name"] == "buckets"
+                ):
+                    # Also during aggregation response generation, the "buckets"
+                    # attribute that many aggregation responses have is very
+                    # complex, supporting over a dozen different aggregation
+                    # types via generics, each in array or object configurations.
+                    # Typing this attribute proved very difficult. A solution
+                    # that worked with mypy and pyright is to type "buckets"
+                    # for the list form, and create a `buckets_as_dict`
+                    # property that is typed appropriately for accessing the
+                    # buckets in dictionary form.
+                    # The generic type is assumed to be the first in the list,
+                    # which is a simplification that should be improved when a
+                    # more complete implementation of generics is added.
+                    if generics[0]["type"]["name"] == "Void":
+                        generic_type = "Any"
+                    else:
+                        _g = self.find_type(
+                            generics[0]["type"]["name"],
+                            generics[0]["type"]["namespace"],
+                        )
+                        generic_type, _ = self.get_python_type(
+                            _g, for_response=for_response
+                        )
+                        generic_type = type_for_types_py(generic_type)
+                    k["args"].append(
+                        {
+                            "name": arg["name"],
+                            # for the type we only include the array form, since
+                            # this client does not request the dict form
+                            "type": f"Sequence[{generic_type}]",
+                            "doc": [
+                                ":arg buckets: (required) the aggregation buckets as a list"
+                            ],
+                            "required": True,
+                        }
+                    )
+                    k["buckets_as_dict"] = generic_type
+                elif namespace == "_types.mapping":
+                    if arg["name"] in ["fields", "properties"]:
+                        # Python DSL provides a high level representation for the
+                        # "fields" and 'properties' properties that many types support
+                        k["args"].append(
+                            {
+                                "name": arg["name"],
+                                "type": 'Union[Mapping[str, Field], "DefaultType"]',
+                                "doc": [f":arg {arg['name']}:"],
+                                "required": False,
+                            }
+                        )
+                        if "params" not in k:
+                            k["params"] = []
+                        k["params"].append(
+                            {
+                                "name": arg["name"],
+                                "param": {"type": "field", "hash": True},
+                            }
+                        )
+
+                    else:
+                        # also the Python DSL provides implementations of analyzers
+                        # and normalizers, so here we make sure these are noted as
+                        # params and have an appropriate type hint.
+                        self.add_attribute(
+                            k, arg, for_types_py=for_types_py, for_response=for_response
+                        )
+                        if arg["name"].endswith("analyzer"):
+                            if "params" not in k:
+                                k["params"] = []
+                            k["params"].append(
+                                {"name": arg["name"], "param": {"type": "analyzer"}}
+                            )
+                            k["args"][-1]["type"] = 'Union[str, DslBase, "DefaultType"]'
+                        elif arg["name"].endswith("normalizer"):
+                            if "params" not in k:
+                                k["params"] = []
+                            k["params"].append(
+                                {"name": arg["name"], "param": {"type": "normalizer"}}
+                            )
+                            k["args"][-1]["type"] = 'Union[str, DslBase, "DefaultType"]'
+                else:
+                    if interface == "Hit" and arg["name"].startswith("_"):
+                        # Python DSL removes the undersore prefix from all the
+                        # properties of the hit, so we do the same
+                        arg["name"] = arg["name"][1:]
+
+                    self.add_attribute(
+                        k, arg, for_types_py=for_types_py, for_response=for_response
+                    )
+
+            if "inherits" not in type_ or "type" not in type_["inherits"]:
+                break
+
+            if "generics" in type_["inherits"]:
+                # Generics are only supported for certain specific cases at this
+                # time. Here we just save them so that they can be recalled later
+                # while traversing over to parent classes to find inherited
+                # attributes.
+                for generic_type in type_["inherits"]["generics"]:
+                    generics.append(generic_type)
+
+            type_ = self.find_type(
+                type_["inherits"]["type"]["name"],
+                type_["inherits"]["type"]["namespace"],
+            )
+        return k
+
+
+def generate_field_py(schema, filename):
+    """Generate field.py with all the Elasticsearch fields as Python classes."""
+    float_fields = ["half_float", "scaled_float", "double", "rank_feature"]
+    integer_fields = ["byte", "short", "long"]
+    range_fields = [
+        "integer_range",
+        "float_range",
+        "long_range",
+        "double_range",
+        "date_range",
+    ]
+    object_fields = ["nested"]
+    coerced_fields = [
+        "boolean",
+        "date",
+        "float",
+        "object",
+        "dense_vector",
+        "integer",
+        "ip",
+        "binary",
+        "percolator",
+    ]
+
+    classes = []
+    property = schema.find_type("Property", "_types.mapping")
+    for type_ in property["type"]["items"]:
+        if type_["type"]["name"] == "DynamicProperty":
+            # no support for dynamic properties
+            continue
+        field = schema.find_type(type_["type"]["name"], type_["type"]["namespace"])
+        name = class_name = ""
+        for prop in field["properties"]:
+            if prop["name"] == "type":
+                if prop["type"]["kind"] != "literal_value":
+                    raise RuntimeError(f"Unexpected property type {prop}")
+                name = prop["type"]["value"]
+                class_name = "".join([n.title() for n in name.split("_")])
+        k = schema.interface_to_python_class(
+            type_["type"]["name"],
+            type_["type"]["namespace"],
+            for_types_py=False,
+            for_response=False,
+        )
+        k["name"] = class_name
+        k["field"] = name
+        k["coerced"] = name in coerced_fields
+        if name in float_fields:
+            k["parent"] = "Float"
+        elif name in integer_fields:
+            k["parent"] = "Integer"
+        elif name in range_fields:
+            k["parent"] = "RangeField"
+        elif name in object_fields:
+            k["parent"] = "Object"
+        else:
+            k["parent"] = "Field"
+        k["args"] = [prop for prop in k["args"] if prop["name"] != "type"]
+        if name == "object":
+            # the DSL's object field has a doc_class argument
+            k["args"] = [
+                {
+                    "name": "doc_class",
+                    "type": 'Union[Type["InnerDoc"], "DefaultType"]',
+                    "doc": [
+                        ":arg doc_class: base doc class that handles mapping.",
+                        "   If no `doc_class` is provided, new instance of `InnerDoc` will be created,",
+                        "   populated with `properties` and used. Can not be provided together with `properties`",
+                    ],
+                    "positional": True,
+                    "required": False,
+                }
+            ] + k["args"]
+        elif name == "date":
+            k["args"] = [
+                {
+                    "name": "default_timezone",
+                    "type": 'Union[str, "tzinfo", "DefaultType"]',
+                    "doc": [
+                        ":arg default_timezone: timezone that will be automatically used for tz-naive values",
+                        "   May be instance of `datetime.tzinfo` or string containing TZ offset",
+                    ],
+                    "positional": True,
+                    "required": False,
+                }
+            ] + k["args"]
+        classes.append(k)
+    # make sure parent classes appear first
+    classes = sorted(
+        classes,
+        key=lambda k: (
+            f'AA{k["name"]}'
+            if k["name"] in ["Float", "Integer", "Object"]
+            else k["name"]
+        ),
+    )
+
+    with open(filename, "w") as f:
+        f.write(field_py.render(classes=classes))
+    print(f"Generated {filename}.")
+
+
+def generate_query_py(schema, filename):
+    """Generate query.py with all the properties of `QueryContainer` as Python
+    classes.
+    """
+    classes = []
+    query_container = schema.find_type("QueryContainer", "_types.query_dsl")
+    for p in query_container["properties"]:
+        classes += schema.property_to_python_class(p)
+
+    with open(filename, "w") as f:
+        f.write(query_py.render(classes=classes, parent="Query"))
+    print(f"Generated {filename}.")
+
+
+def generate_aggs_py(schema, filename):
+    """Generate aggs.py with all the properties of `AggregationContainer` as
+    Python classes.
+    """
+    classes = []
+    aggs_container = schema.find_type("AggregationContainer", "_types.aggregations")
+    for p in aggs_container["properties"]:
+        if "containerProperty" not in p or not p["containerProperty"]:
+            classes += schema.property_to_python_class(p)
+
+    with open(filename, "w") as f:
+        f.write(aggs_py.render(classes=classes, parent="Agg"))
+    print(f"Generated {filename}.")
+
+
+def generate_response_init_py(schema, filename):
+    """Generate response/__init__.py with all the response properties
+    documented and typed.
+    """
+    search_response = schema.interface_to_python_class(
+        "ResponseBody",
+        "_global.search",
+        for_types_py=False,
+        for_response=True,
+    )
+    ubq_response = schema.interface_to_python_class(
+        "Response",
+        "_global.update_by_query",
+        for_types_py=False,
+        for_response=True,
+    )
+    with open(filename, "w") as f:
+        f.write(
+            response_init_py.render(response=search_response, ubq_response=ubq_response)
+        )
+    print(f"Generated {filename}.")
+
+
+def generate_types_py(schema, filename):
+    """Generate types.py"""
+    classes = {}
+    for interface in schema.interfaces:
+        if interface == "PipeSeparatedFlags":
+            continue  # handled as a special case
+        for_response = interface in schema.response_interfaces
+        k = schema.interface_to_python_class(
+            interface, for_types_py=True, for_response=for_response
+        )
+        classes[k["name"]] = k
+
+    # sort classes by being request/response and then by name
+    sorted_classes = sorted(
+        list(classes.keys()),
+        key=lambda i: str(int(i in schema.response_interfaces)) + i,
+    )
+    classes_list = []
+    for n in sorted_classes:
+        k = classes[n]
+        if k in classes_list:
+            continue
+        classes_list.append(k)
+
+    with open(filename, "w") as f:
+        f.write(types_py.render(classes=classes_list))
+    print(f"Generated {filename}.")
+
+
+if __name__ == "__main__":
+    v = subprocess.check_output(["git", "branch", "--show-current"]).strip().decode()
+    schema = ElasticsearchSchema(v)
+    generate_field_py(schema, "elasticsearch/dsl/field.py")
+    generate_query_py(schema, "elasticsearch/dsl/query.py")
+    generate_aggs_py(schema, "elasticsearch/dsl/aggs.py")
+    generate_response_init_py(schema, "elasticsearch/dsl/response/__init__.py")
+    generate_types_py(schema, "elasticsearch/dsl/types.py")
diff -pruN 8.17.2-2/utils/generate-examples.py 9.2.0-1/utils/generate-examples.py
--- 8.17.2-2/utils/generate-examples.py	2025-03-19 06:35:35.000000000 +0000
+++ 9.2.0-1/utils/generate-examples.py	2025-10-28 16:50:53.000000000 +0000
@@ -198,7 +198,7 @@ ParsedSource = collections.namedtuple("P
 def blacken(filename):
     runner = CliRunner()
     result = runner.invoke(
-        black.main, [str(filename), "--line-length=75", "--target-version=py37"]
+        black.main, [str(filename), "--line-length=75", "--target-version=py310"]
     )
     assert result.exit_code == 0, result.output
 
diff -pruN 8.17.2-2/utils/run-unasync-dsl.py 9.2.0-1/utils/run-unasync-dsl.py
--- 8.17.2-2/utils/run-unasync-dsl.py	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/utils/run-unasync-dsl.py	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,154 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import os
+import subprocess
+import sys
+from glob import glob
+from pathlib import Path
+
+import unasync
+
+
+def main(check=False):
+    # the list of directories that need to be processed with unasync
+    # each entry has two paths:
+    #   - the source path with the async sources
+    #   - the destination path where the sync sources should be written
+    source_dirs = [
+        (
+            "elasticsearch/dsl/_async/",
+            "elasticsearch/dsl/_sync/",
+        ),
+        ("test_elasticsearch/test_dsl/_async/", "test_elasticsearch/test_dsl/_sync/"),
+        (
+            "test_elasticsearch/test_dsl/test_integration/_async/",
+            "test_elasticsearch/test_dsl/test_integration/_sync/",
+        ),
+        (
+            "test_elasticsearch/test_dsl/test_integration/test_examples/_async/",
+            "test_elasticsearch/test_dsl/test_integration/test_examples/_sync/",
+        ),
+        ("examples/dsl/async/", "examples/dsl/"),
+    ]
+
+    # Unasync all the generated async code
+    additional_replacements = {
+        "_async": "_sync",
+        "AsyncElasticsearch": "Elasticsearch",
+        "AsyncSearch": "Search",
+        "AsyncMultiSearch": "MultiSearch",
+        "AsyncEmptySearch": "EmptySearch",
+        "AsyncDocument": "Document",
+        "AsyncIndexMeta": "IndexMeta",
+        "AsyncIndexTemplate": "IndexTemplate",
+        "AsyncIndex": "Index",
+        "AsyncComposableIndexTemplate": "ComposableIndexTemplate",
+        "AsyncUpdateByQuery": "UpdateByQuery",
+        "AsyncMapping": "Mapping",
+        "AsyncFacetedSearch": "FacetedSearch",
+        "AsyncUsingType": "UsingType",
+        "AsyncBaseESModel": "BaseESModel",
+        "async_connections": "connections",
+        "async_scan": "scan",
+        "async_simulate": "simulate",
+        "async_bulk": "bulk",
+        "async_mock_client": "mock_client",
+        "async_client": "client",
+        "async_data_client": "data_client",
+        "async_write_client": "write_client",
+        "async_pull_request": "pull_request",
+        "async_examples": "examples",
+        "async_sleep": "sleep",
+        "assert_awaited_once_with": "assert_called_once_with",
+        "asynccontextmanager": "contextmanager",
+    }
+    rules = [
+        unasync.Rule(
+            fromdir=dir[0],
+            todir=f"{dir[0]}_sync_check/" if check else dir[1],
+            additional_replacements=additional_replacements,
+        )
+        for dir in source_dirs
+    ]
+
+    filepaths = []
+    for root, _, filenames in os.walk(Path(__file__).absolute().parent.parent):
+        if "/site-packages" in root or "/." in root or "__pycache__" in root:
+            continue
+        for filename in filenames:
+            if filename.rpartition(".")[-1] in (
+                "py",
+                "pyi",
+            ) and not filename.startswith("utils.py"):
+                filepaths.append(os.path.join(root, filename))
+
+    unasync.unasync_files(filepaths, rules)
+    output_dirs = []
+    for dir in source_dirs:
+        output_dirs.append(f"{dir[0]}_sync_check/" if check else dir[1])
+    subprocess.check_call(["black", "--target-version=py310", *output_dirs])
+    subprocess.check_call(["isort", *output_dirs])
+    for dir, output_dir in zip(source_dirs, output_dirs):
+        for file in glob("*.py", root_dir=dir[0]):
+            # remove asyncio from sync files
+            subprocess.check_call(
+                ["sed", "-i.bak", "/^import asyncio$/d", f"{output_dir}{file}"]
+            )
+            subprocess.check_call(
+                [
+                    "sed",
+                    "-i.bak",
+                    "s/asyncio\\.run(main())/main()/",
+                    f"{output_dir}{file}",
+                ]
+            )
+            subprocess.check_call(
+                [
+                    "sed",
+                    "-i.bak",
+                    's/"elasticsearch\\[async\\]"/elasticsearch/',
+                    f"{output_dir}{file}",
+                ]
+            )
+            for library in ["asyncio", "trio", "anyio"]:
+                subprocess.check_call(
+                    [
+                        "sed",
+                        "-i.bak",
+                        f"s/pytest.mark.{library}/pytest.mark.sync/",
+                        f"{output_dir}{file}",
+                    ]
+                )
+                subprocess.check_call(["rm", f"{output_dir}{file}.bak"])
+
+            if check:
+                # make sure there are no differences between _sync and _sync_check
+                subprocess.check_call(
+                    [
+                        "diff",
+                        f"{dir[1]}{file}",
+                        f"{output_dir}{file}",
+                    ]
+                )
+
+        if check:
+            subprocess.check_call(["rm", "-rf", output_dir])
+
+
+if __name__ == "__main__":
+    main(check="--check" in sys.argv)
diff -pruN 8.17.2-2/utils/templates/aggs.py.tpl 9.2.0-1/utils/templates/aggs.py.tpl
--- 8.17.2-2/utils/templates/aggs.py.tpl	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/utils/templates/aggs.py.tpl	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,321 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+from copy import deepcopy
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    ClassVar,
+    Dict,
+    Generic,
+    Iterable,
+    Literal,
+    Mapping,
+    MutableMapping,
+    Optional,
+    Sequence,
+    Union,
+    cast,
+)
+
+from elastic_transport.client_utils import DEFAULT
+
+from .query import Query
+from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData
+from .utils import _R, AttrDict, DslBase
+from . import wrappers
+
+if TYPE_CHECKING:
+    from elastic_transport.client_utils import DefaultType
+    from . import types
+    from .document_base import InstrumentedField
+    from .search_base import SearchBase
+
+
+def A(
+    name_or_agg: Union[MutableMapping[str, Any], "Agg[_R]", str],
+    filter: Optional[Union[str, "Query"]] = None,
+    **params: Any,
+) -> "Agg[_R]":
+    if filter is not None:
+        if name_or_agg != "filter":
+            raise ValueError(
+                "Aggregation %r doesn't accept positional argument 'filter'."
+                % name_or_agg
+            )
+        params["filter"] = filter
+
+    # {"terms": {"field": "tags"}, "aggs": {...}}
+    if isinstance(name_or_agg, collections.abc.MutableMapping):
+        if params:
+            raise ValueError("A() cannot accept parameters when passing in a dict.")
+        # copy to avoid modifying in-place
+        agg = deepcopy(name_or_agg)
+        # pop out nested aggs
+        aggs = agg.pop("aggs", None)
+        # pop out meta data
+        meta = agg.pop("meta", None)
+        # should be {"terms": {"field": "tags"}}
+        if len(agg) != 1:
+            raise ValueError(
+                'A() can only accept dict with an aggregation ({"terms": {...}}). '
+                "Instead it got (%r)" % name_or_agg
+            )
+        agg_type, params = agg.popitem()
+        if aggs:
+            params = params.copy()
+            params["aggs"] = aggs
+        if meta:
+            params = params.copy()
+            params["meta"] = meta
+        return Agg[_R].get_dsl_class(agg_type)(_expand__to_dot=False, **params)
+
+    # Terms(...) just return the nested agg
+    elif isinstance(name_or_agg, Agg):
+        if params:
+            raise ValueError(
+                "A() cannot accept parameters when passing in an Agg object."
+            )
+        return name_or_agg
+
+    # "terms", field="tags"
+    return Agg[_R].get_dsl_class(name_or_agg)(**params)
+
+
+class Agg(DslBase, Generic[_R]):
+    _type_name = "agg"
+    _type_shortcut = staticmethod(A)
+    name = ""
+
+    def __contains__(self, key: str) -> bool:
+        return False
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super().to_dict()
+        if isinstance(d[self.name], dict):
+            n = cast(Dict[str, Any], d[self.name])
+            if "meta" in n:
+                d["meta"] = n.pop("meta")
+        return d
+
+    def result(self, search: "SearchBase[_R]", data: Dict[str, Any]) -> AttrDict[Any]:
+        return AggResponse[_R](self, search, data)
+
+
+class AggBase(Generic[_R]):
+    aggs: Dict[str, Agg[_R]]
+    _base: Agg[_R]
+    _params: Dict[str, Any]
+    _param_defs: ClassVar[Dict[str, Any]] = {
+        "aggs": {"type": "agg", "hash": True},
+    }
+
+    def __contains__(self, key: str) -> bool:
+        return key in self._params.get("aggs", {})
+
+    def __getitem__(self, agg_name: str) -> Agg[_R]:
+        agg = cast(
+            Agg[_R], self._params.setdefault("aggs", {})[agg_name]
+        )  # propagate KeyError
+
+        # make sure we're not mutating a shared state - whenever accessing a
+        # bucket, return a shallow copy of it to be safe
+        if isinstance(agg, Bucket):
+            agg = A(agg.name, **agg._params)
+            # be sure to store the copy so any modifications to it will affect us
+            self._params["aggs"][agg_name] = agg
+
+        return agg
+
+    def __setitem__(self, agg_name: str, agg: Agg[_R]) -> None:
+        self.aggs[agg_name] = A(agg)
+
+    def __iter__(self) -> Iterable[str]:
+        return iter(self.aggs)
+
+    def _agg(
+        self,
+        bucket: bool,
+        name: str,
+        agg_type: Union[Dict[str, Any], Agg[_R], str],
+        *args: Any,
+        **params: Any,
+    ) -> Agg[_R]:
+        agg = self[name] = A(agg_type, *args, **params)
+
+        # For chaining - when creating new buckets return them...
+        if bucket:
+            return agg
+        # otherwise return self._base so we can keep chaining
+        else:
+            return self._base
+
+    def metric(
+        self,
+        name: str,
+        agg_type: Union[Dict[str, Any], Agg[_R], str],
+        *args: Any,
+        **params: Any,
+    ) -> Agg[_R]:
+        return self._agg(False, name, agg_type, *args, **params)
+
+    def bucket(
+        self,
+        name: str,
+        agg_type: Union[Dict[str, Any], Agg[_R], str],
+        *args: Any,
+        **params: Any,
+    ) -> "Bucket[_R]":
+        return cast("Bucket[_R]", self._agg(True, name, agg_type, *args, **params))
+
+    def pipeline(
+        self,
+        name: str,
+        agg_type: Union[Dict[str, Any], Agg[_R], str],
+        *args: Any,
+        **params: Any,
+    ) -> "Pipeline[_R]":
+        return cast("Pipeline[_R]", self._agg(False, name, agg_type, *args, **params))
+
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return BucketData(self, search, data)  # type: ignore[arg-type]
+
+
+class Bucket(AggBase[_R], Agg[_R]):
+    def __init__(self, **params: Any):
+        super().__init__(**params)
+        # remember self for chaining
+        self._base = self
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super(AggBase, self).to_dict()
+        if isinstance(d[self.name], dict):
+            n = cast(AttrDict[Any], d[self.name])
+            if "aggs" in n:
+                d["aggs"] = n.pop("aggs")
+        return d
+
+
+class Pipeline(Agg[_R]):
+    pass
+
+
+{% for k in classes %}
+class {{ k.name }}({{ k.parent if k.parent else parent }}[_R]):
+    """
+    {% for line in k.docstring %}
+    {{ line }}
+    {% endfor %}
+    {% if k.args %}
+        {% if k.docstring %}
+
+        {% endif %}
+        {% for kwarg in k.args %}
+            {% for line in kwarg.doc %}
+    {{ line }}
+            {% endfor %}
+        {% endfor %}
+    {% endif %}
+    """
+    {% if k.property_name %}
+    name = "{{ k.property_name }}"
+    {% endif %}
+    {% if k.params %}
+    _param_defs = {
+    {% for param in k.params %}
+        "{{ param.name }}": {{ param.param }},
+    {% endfor %}
+    {% if k.name == "Filter" or k.name == "Filters" or k.name == "Composite" %}
+        {# Some #}
+        "aggs": {"type": "agg", "hash": True},
+    {% endif %}
+    }
+    {% endif %}
+
+    def __init__(
+        self,
+        {% if k.args | length != 1 %}
+            {% for arg in k.args %}
+                {% if arg.positional %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+                {% endif %}
+        {% endfor %}
+            {% if k.args and not k.args[-1].positional %}
+        *,
+            {% endif %}
+            {% for arg in k.args %}
+                {% if not arg.positional %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+                {% endif %}
+            {% endfor %}
+        {% else %}
+            {# when we have just one argument, we allow it as positional or keyword #}
+            {% for arg in k.args %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+            {% endfor %}
+        {% endif %}
+        **kwargs: Any
+    ):
+        {% if k.name == "FunctionScore" %}
+            {# continuation of the FunctionScore shortcut property support from above #}
+        if functions is DEFAULT:
+            functions = []
+            for name in ScoreFunction._classes:
+                if name in kwargs:
+                    functions.append({name: kwargs.pop(name)})  # type: ignore
+        {% elif k.is_single_field %}
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        {% elif k.is_multi_field %}
+        if _fields is not DEFAULT:
+            for field, value in _fields.items():
+                kwargs[str(field)] = value
+        {% endif %}
+        super().__init__(
+            {% for arg in k.args %}
+                {% if not arg.positional %}
+            {{ arg.name }}={{ arg.name }},
+                {% endif %}
+            {% endfor %}
+            **kwargs
+        )
+
+    {# what follows is a set of Pythonic enhancements to some of the query classes
+       which are outside the scope of the code generator #}
+    {% if k.name == "Filter" %}
+    def to_dict(self) -> Dict[str, Any]:
+        d = super().to_dict()
+        if isinstance(d[self.name], dict):
+            n = cast(AttrDict[Any], d[self.name])
+            n.update(n.pop("filter", {}))
+        return d
+
+    {% elif k.name == "Histogram" or k.name == "DateHistogram" or k.name == "AutoDateHistogram" or k.name == "VariableWidthHistogram" %}
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return FieldBucketData(self, search, data)
+
+    {% elif k.name == "Terms" %}
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return FieldBucketData(self, search, data)
+
+    {% elif k.name == "TopHits" %}
+    def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]:
+        return TopHitsData(self, search, data)
+
+    {% endif %}
+{% endfor %}
diff -pruN 8.17.2-2/utils/templates/field.py.tpl 9.2.0-1/utils/templates/field.py.tpl
--- 8.17.2-2/utils/templates/field.py.tpl	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/utils/templates/field.py.tpl	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,481 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import base64
+import collections.abc
+import ipaddress
+from copy import deepcopy
+from datetime import date, datetime
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Iterable,
+    Iterator,
+    Literal,
+    Mapping,
+    Optional,
+    Sequence,
+    Tuple,
+    Type,
+    Union,
+    cast,
+)
+
+from dateutil import parser, tz
+from elastic_transport.client_utils import DEFAULT, DefaultType
+
+from .exceptions import ValidationException
+from .query import Q
+from .utils import AttrDict, AttrList, DslBase
+from .wrappers import Range
+
+if TYPE_CHECKING:
+    from datetime import tzinfo
+    from ipaddress import IPv4Address, IPv6Address
+
+    from _operator import _SupportsComparison
+
+    from .document import InnerDoc
+    from .document_base import InstrumentedField
+    from .mapping_base import MappingBase
+    from .query import Query
+    from . import types
+
+unicode = str
+
+
+def construct_field(
+    name_or_field: Union[
+        str,
+        "Field",
+        Dict[str, Any],
+    ],
+    **params: Any,
+) -> "Field":
+    # {"type": "text", "analyzer": "snowball"}
+    if isinstance(name_or_field, collections.abc.Mapping):
+        if params:
+            raise ValueError(
+                "construct_field() cannot accept parameters when passing in a dict."
+            )
+        params = deepcopy(name_or_field)
+        if "type" not in params:
+            # inner object can be implicitly defined
+            if "properties" in params:
+                name = "object"
+            else:
+                raise ValueError('construct_field() needs to have a "type" key.')
+        else:
+            name = params.pop("type")
+        return Field.get_dsl_class(name)(**params)
+
+    # Text()
+    if isinstance(name_or_field, Field):
+        if params:
+            raise ValueError(
+                "construct_field() cannot accept parameters "
+                "when passing in a construct_field object."
+            )
+        return name_or_field
+
+    # "text", analyzer="snowball"
+    return Field.get_dsl_class(name_or_field)(**params)
+
+
+class Field(DslBase):
+    _type_name = "field"
+    _type_shortcut = staticmethod(construct_field)
+    # all fields can be multifields
+    _param_defs = {"fields": {"type": "field", "hash": True}}
+    name = ""
+    _coerce = False
+
+    def __init__(
+        self, multi: bool = False, required: bool = False, *args: Any, **kwargs: Any
+    ):
+        """
+        :arg bool multi: specifies whether field can contain array of values
+        :arg bool required: specifies whether field is required
+        """
+        self._multi = multi
+        self._required = required
+        super().__init__(*args, **kwargs)
+
+    def __getitem__(self, subfield: str) -> "Field":
+        return cast(Field, self._params.get("fields", {})[subfield])
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Any:
+        return data
+
+    def _safe_serialize(self, data: Any, skip_empty: bool) -> Any:
+        try:
+            return self._serialize(data, skip_empty)
+        except TypeError:
+            # older method signature, without skip_empty
+            return self._serialize(data)  # type: ignore[call-arg]
+
+    def _deserialize(self, data: Any) -> Any:
+        return data
+
+    def _empty(self) -> Optional[Any]:
+        return None
+
+    def empty(self) -> Optional[Any]:
+        if self._multi:
+            return AttrList([])
+        return self._empty()
+
+    def serialize(self, data: Any, skip_empty: bool = True) -> Any:
+        if isinstance(data, (list, AttrList, tuple)):
+            return list(map(self._safe_serialize, cast(Iterable[Any], data), [skip_empty] * len(data)))
+        return self._safe_serialize(data, skip_empty)
+
+    def deserialize(self, data: Any) -> Any:
+        if isinstance(data, (list, AttrList, tuple)):
+            data = [
+                None if d is None else self._deserialize(d)
+                for d in cast(Iterable[Any], data)
+            ]
+            return data
+        if data is None:
+            return None
+        return self._deserialize(data)
+
+    def clean(self, data: Any) -> Any:
+        if data is not None:
+            data = self.deserialize(data)
+        if data in (None, [], {}) and self._required:
+            raise ValidationException("Value required for this field.")
+        return data
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = super().to_dict()
+        name, value = cast(Tuple[str, Dict[str, Any]], d.popitem())
+        value["type"] = name
+        return value
+
+
+class CustomField(Field):
+    name = "custom"
+    _coerce = True
+
+    def to_dict(self) -> Dict[str, Any]:
+        if isinstance(self.builtin_type, Field):
+            return self.builtin_type.to_dict()
+
+        d = super().to_dict()
+        d["type"] = self.builtin_type
+        return d
+
+
+class RangeField(Field):
+    _coerce = True
+    _core_field: Optional[Field] = None
+
+    def _deserialize(self, data: Any) -> Range["_SupportsComparison"]:
+        if isinstance(data, Range):
+            return data
+        data = {k: self._core_field.deserialize(v) for k, v in data.items()}  # type: ignore[union-attr]
+        return Range(data)
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Optional[Dict[str, Any]]:
+        if data is None:
+            return None
+        if not isinstance(data, collections.abc.Mapping):
+            data = data.to_dict()
+        return {k: self._core_field.serialize(v) for k, v in data.items()}  # type: ignore[union-attr]
+
+
+{% for k in classes %}
+class {{ k.name }}({{ k.parent }}):
+    """
+    {% for line in k.docstring %}
+    {{ line }}
+    {% endfor %}
+    {% if k.args %}
+        {% if k.docstring %}
+
+        {% endif %}
+        {% for kwarg in k.args %}
+            {% for line in kwarg.doc %}
+    {{ line }}
+            {% endfor %}
+        {% endfor %}
+    {% endif %}
+    """
+    name = "{{ k.field }}"
+    {% if k.coerced %}
+    _coerce = True
+    {% endif %}
+    {% if k.name.endswith('Range') %}
+    _core_field = {{ k.name[:-5] }}()
+    {% endif %}
+    {% if k.params %}
+    _param_defs = {
+    {% for param in k.params %}
+        "{{ param.name }}": {{ param.param }},
+    {% endfor %}
+    }
+    {% endif %}
+
+    def __init__(
+        self,
+        {% for arg in k.args %}
+            {% if arg.positional %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+            {% endif %}
+        {% endfor %}
+        *args: Any,
+        {% for arg in k.args %}
+            {% if not arg.positional %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+            {% endif %}
+        {% endfor %}
+        **kwargs: Any
+    ):
+        {% for arg in k.args %}
+            {% if not arg.positional %}
+        if {{ arg.name }} is not DEFAULT:
+                {% if "InstrumentedField" in arg.type %}
+                    {% if "Sequence" in arg.type %}
+            if isinstance({{ arg.name }}, list):
+                kwargs["{{ arg.name }}"] = [str(field) for field in {{ arg.name }}]
+            else:
+                kwargs["{{ arg.name }}"] = str({{ arg.name }})
+                    {% else %}
+            kwargs["{{ arg.name }}"] = str({{ arg.name }})
+                    {% endif %}
+                {% else %}
+            kwargs["{{ arg.name }}"] = {{ arg.name }}
+                {% endif %}
+            {% endif %}
+        {% endfor %}
+        {% if k.field == 'object' %}
+
+        if doc_class is not DEFAULT and (properties is not DEFAULT or dynamic is not DEFAULT):
+            raise ValidationException(
+                "doc_class and properties/dynamic should not be provided together"
+            )
+        if doc_class is not DEFAULT:
+            self._doc_class: Type["InnerDoc"] = doc_class
+        else:
+            # FIXME import
+            from .document import InnerDoc
+
+            # no InnerDoc subclass, creating one instead...
+            self._doc_class = type("InnerDoc", (InnerDoc,), {})
+            for name, field in (properties if properties is not DEFAULT else {}).items():
+                self._doc_class._doc_type.mapping.field(name, field)
+            if "properties" in kwargs:
+                del kwargs["properties"]
+            if dynamic is not DEFAULT:
+                self._doc_class._doc_type.mapping.meta("dynamic", dynamic)
+
+        self._mapping: "MappingBase" = deepcopy(self._doc_class._doc_type.mapping)
+        super().__init__(**kwargs)
+
+    def __getitem__(self, name: str) -> Field:
+        return self._mapping[name]
+
+    def __contains__(self, name: str) -> bool:
+        return name in self._mapping
+
+    def _empty(self) -> "InnerDoc":
+        return self._wrap({})
+
+    def _wrap(self, data: Dict[str, Any]) -> "InnerDoc":
+        return self._doc_class.from_es(data, data_only=True)
+
+    def empty(self) -> Union["InnerDoc", AttrList[Any]]:
+        if self._multi:
+            return AttrList[Any]([], self._wrap)
+        return self._empty()
+
+    def to_dict(self) -> Dict[str, Any]:
+        d = self._mapping.to_dict()
+        d.update(super().to_dict())
+        return d
+
+    def _collect_fields(self) -> Iterator[Field]:
+        return self._mapping.properties._collect_fields()
+
+    def _deserialize(self, data: Any) -> "InnerDoc":
+        # don't wrap already wrapped data
+        if isinstance(data, self._doc_class):
+            return data
+
+        if isinstance(data, AttrDict):
+            data = data._d_
+
+        return self._wrap(data)
+
+    def _serialize(
+        self, data: Optional[Union[Dict[str, Any], "InnerDoc"]], skip_empty: bool
+    ) -> Optional[Dict[str, Any]]:
+        if data is None:
+            return None
+
+        # somebody assigned raw dict to the field, we should tolerate that
+        if isinstance(data, collections.abc.Mapping):
+            return data
+
+        try:
+            return data.to_dict(skip_empty=skip_empty)
+        except TypeError:
+            # this would only happen if an AttrDict was given instead of an InnerDoc
+            return data.to_dict()
+
+    def clean(self, data: Any) -> Any:
+        data = super().clean(data)
+        if data is None:
+            return None
+        if isinstance(data, (list, AttrList)):
+            for d in cast(Iterator["InnerDoc"], data):
+                d.full_clean()
+        else:
+            data.full_clean()
+        return data
+
+    def update(self, other: Any, update_only: bool = False) -> None:
+        if not isinstance(other, Object):
+            # not an inner/nested object, no merge possible
+            return
+
+        self._mapping.update(other._mapping, update_only)
+        {% elif k.field == "nested" %}
+        kwargs.setdefault("multi", True)
+        super().__init__(*args, **kwargs)
+        {% elif k.field == "date" %}
+
+        if default_timezone is DEFAULT:
+            self._default_timezone = None
+        elif isinstance(default_timezone, str):
+            self._default_timezone = tz.gettz(default_timezone)
+        else:
+            self._default_timezone = default_timezone
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> Union[datetime, date]:
+        if isinstance(data, str):
+            try:
+                data = parser.parse(data)
+            except Exception as e:
+                raise ValidationException(
+                    f"Could not parse date from the value ({data!r})", e
+                )
+            # we treat the yyyy-MM-dd format as a special case
+            if hasattr(self, "format") and self.format == "yyyy-MM-dd":
+                data = data.date()
+
+        if isinstance(data, datetime):
+            if self._default_timezone and data.tzinfo is None:
+                data = data.replace(tzinfo=self._default_timezone)
+            return cast(datetime, data)
+        if isinstance(data, date):
+            return data
+        if isinstance(data, int):
+            # Divide by a float to preserve milliseconds on the datetime.
+            return datetime.utcfromtimestamp(data / 1000.0)
+
+        raise ValidationException(f"Could not parse date from the value ({data!r})")        
+        {% elif k.field == "boolean" %}
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> bool:
+        if data == "false":
+            return False
+        return bool(data)
+
+    def clean(self, data: Any) -> Optional[bool]:
+        if data is not None:
+            data = self.deserialize(data)
+        if data is None and self._required:
+            raise ValidationException("Value required for this field.")
+        return data  # type: ignore[no-any-return]        
+        {% elif k.field == "float" %}
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> float:
+        return float(data)
+        {% elif k.field == "dense_vector" %}
+        self._element_type = kwargs.get("element_type", "float")
+        if self._element_type in ["float", "byte"]:
+            kwargs["multi"] = True
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> Any:
+        if self._element_type == "float":
+            return float(data)
+        elif self._element_type == "byte":
+            return int(data)
+        return data
+        {% elif k.field == "scaled_float" %}
+        if 'scaling_factor' not in kwargs:
+            if len(args) > 0:
+                kwargs['scaling_factor'] = args[0]
+                args = args[1:]
+            else:
+                raise TypeError("missing required argument: 'scaling_factor'")
+        super().__init__(*args, **kwargs)
+        {% elif k.field == "integer" %}
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> int:
+        return int(data)        
+        {% elif k.field == "ip" %}
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> Union["IPv4Address", "IPv6Address"]:
+        # the ipaddress library for pypy only accepts unicode.
+        return ipaddress.ip_address(unicode(data))
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Optional[str]:
+        if data is None:
+            return None
+        return str(data)        
+        {% elif k.field == "binary" %}
+        super().__init__(*args, **kwargs)
+
+    def clean(self, data: str) -> str:
+        # Binary fields are opaque, so there's not much cleaning
+        # that can be done.
+        return data
+
+    def _deserialize(self, data: Any) -> bytes:
+        return base64.b64decode(data)
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Optional[str]:
+        if data is None:
+            return None
+        return base64.b64encode(data).decode()        
+        {% elif k.field == "percolator" %}
+        super().__init__(*args, **kwargs)
+
+    def _deserialize(self, data: Any) -> "Query":
+        return Q(data)  # type: ignore[no-any-return]
+
+    def _serialize(self, data: Any, skip_empty: bool) -> Optional[Dict[str, Any]]:
+        if data is None:
+            return None
+        return data.to_dict()  # type: ignore[no-any-return]
+        {% else %}
+        super().__init__(*args, **kwargs)
+        {% endif %}
+
+
+{% endfor %}
diff -pruN 8.17.2-2/utils/templates/query.py.tpl 9.2.0-1/utils/templates/query.py.tpl
--- 8.17.2-2/utils/templates/query.py.tpl	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/utils/templates/query.py.tpl	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,373 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+import collections.abc
+from copy import deepcopy
+from itertools import chain
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    ClassVar,
+    Dict,
+    List,
+    Literal,
+    Mapping,
+    MutableMapping,
+    Optional,
+    Protocol,
+    Sequence,
+    TypeVar,
+    Union,
+    cast,
+    overload,
+)
+
+from elastic_transport.client_utils import DEFAULT
+
+# 'SF' looks unused but the test suite assumes it's available
+# from this module so others are liable to do so as well.
+from .function import SF  # noqa: F401
+from .function import ScoreFunction
+from .utils import DslBase
+
+if TYPE_CHECKING:
+    from elastic_transport.client_utils import DefaultType
+    from . import types, wrappers
+    from .document_base import InstrumentedField
+
+_T = TypeVar("_T")
+_M = TypeVar("_M", bound=Mapping[str, Any])
+
+
+class QProxiedProtocol(Protocol[_T]):
+    _proxied: _T
+
+
+@overload
+def Q(name_or_query: MutableMapping[str, _M]) -> "Query": ...
+
+
+@overload
+def Q(name_or_query: "Query") -> "Query": ...
+
+
+@overload
+def Q(name_or_query: QProxiedProtocol[_T]) -> _T: ...
+
+
+@overload
+def Q(name_or_query: str = "match_all", **params: Any) -> "Query": ...
+
+
+def Q(
+    name_or_query: Union[
+        str,
+        "Query",
+        QProxiedProtocol[_T],
+        MutableMapping[str, _M],
+    ] = "match_all",
+    **params: Any,
+) -> Union["Query", _T]:
+    # {"match": {"title": "python"}}
+    if isinstance(name_or_query, collections.abc.MutableMapping):
+        if params:
+            raise ValueError("Q() cannot accept parameters when passing in a dict.")
+        if len(name_or_query) != 1:
+            raise ValueError(
+                'Q() can only accept dict with a single query ({"match": {...}}). '
+                "Instead it got (%r)" % name_or_query
+            )
+        name, q_params = deepcopy(name_or_query).popitem()
+        return Query.get_dsl_class(name)(_expand__to_dot=False, **q_params)
+
+    # MatchAll()
+    if isinstance(name_or_query, Query):
+        if params:
+            raise ValueError(
+                "Q() cannot accept parameters when passing in a Query object."
+            )
+        return name_or_query
+
+    # s.query = Q('filtered', query=s.query)
+    if hasattr(name_or_query, "_proxied"):
+        return cast(QProxiedProtocol[_T], name_or_query)._proxied
+
+    # "match", title="python"
+    return Query.get_dsl_class(name_or_query)(**params)
+
+
+class Query(DslBase):
+    _type_name = "query"
+    _type_shortcut = staticmethod(Q)
+    name: ClassVar[Optional[str]] = None
+
+    # Add type annotations for methods not defined in every subclass
+    __ror__: ClassVar[Callable[["Query", "Query"], "Query"]]
+    __radd__: ClassVar[Callable[["Query", "Query"], "Query"]]
+    __rand__: ClassVar[Callable[["Query", "Query"], "Query"]]
+
+    def __add__(self, other: "Query") -> "Query":
+        # make sure we give queries that know how to combine themselves
+        # preference
+        if hasattr(other, "__radd__"):
+            return other.__radd__(self)
+        return Bool(must=[self, other])
+
+    def __invert__(self) -> "Query":
+        return Bool(must_not=[self])
+
+    def __or__(self, other: "Query") -> "Query":
+        # make sure we give queries that know how to combine themselves
+        # preference
+        if hasattr(other, "__ror__"):
+            return other.__ror__(self)
+        return Bool(should=[self, other])
+
+    def __and__(self, other: "Query") -> "Query":
+        # make sure we give queries that know how to combine themselves
+        # preference
+        if hasattr(other, "__rand__"):
+            return other.__rand__(self)
+        return Bool(must=[self, other])
+
+
+{% for k in classes %}
+class {{ k.name }}({{ parent }}):
+    """
+    {% for line in k.docstring %}
+    {{ line }}
+    {% endfor %}
+    {% if k.args %}
+        {% if k.docstring %}
+
+        {% endif %}
+        {% for kwarg in k.args %}
+            {% for line in kwarg.doc %}
+    {{ line }}
+            {% endfor %}
+        {% endfor %}
+    {% endif %}
+    """
+    name = "{{ k.property_name }}"
+    {% if k.params %}
+    _param_defs = {
+    {% for param in k.params %}
+        "{{ param.name }}": {{ param.param }},
+    {% endfor %}
+    {% if k.name == "FunctionScore" %}
+        {# The FunctionScore class implements a custom solution for the `functions`
+           shortcut property. Until the code generator can support shortcut
+           properties directly that solution is added here #}
+        "filter": {"type": "query"},
+    {% endif %}
+    }
+    {% endif %}
+
+    def __init__(
+        self,
+        {% for arg in k.args %}
+            {% if arg.positional %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+            {% endif %}
+        {% endfor %}
+        {% if k.args and not k.args[-1].positional %}
+        *,
+        {% endif %}
+        {% for arg in k.args %}
+            {% if not arg.positional %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+            {% endif %}
+        {% endfor %}
+        **kwargs: Any
+    ):
+        {% if k.name == "FunctionScore" %}
+            {# continuation of the FunctionScore shortcut property support from above #}
+        if functions is DEFAULT:
+            functions = []
+            for name in ScoreFunction._classes:
+                if name in kwargs:
+                    functions.append({name: kwargs.pop(name)})  # type: ignore[arg-type]
+        {% elif k.is_single_field %}
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+        {% elif k.is_multi_field %}
+        if _fields is not DEFAULT:
+            for field, value in _fields.items():
+                kwargs[str(field)] = value
+        {% endif %}
+        super().__init__(
+            {% for arg in k.args %}
+                {% if not arg.positional %}
+            {{ arg.name }}={{ arg.name }},
+                {% endif %}
+            {% endfor %}
+            **kwargs
+        )
+
+    {# what follows is a set of Pythonic enhancements to some of the query classes
+       which are outside the scope of the code generator #}
+    {% if k.name == "MatchAll" %}
+    def __add__(self, other: "Query") -> "Query":
+        return other._clone()
+
+    __and__ = __rand__ = __radd__ = __add__
+
+    def __or__(self, other: "Query") -> "MatchAll":
+        return self
+
+    __ror__ = __or__
+
+    def __invert__(self) -> "MatchNone":
+        return MatchNone()
+
+
+EMPTY_QUERY = MatchAll()
+
+    {% elif k.name == "MatchNone" %}
+    def __add__(self, other: "Query") -> "MatchNone":
+        return self
+
+    __and__ = __rand__ = __radd__ = __add__
+
+    def __or__(self, other: "Query") -> "Query":
+        return other._clone()
+
+    __ror__ = __or__
+
+    def __invert__(self) -> MatchAll:
+        return MatchAll()
+
+    {% elif k.name == "Bool" %}
+    def __add__(self, other: Query) -> "Bool":
+        q = self._clone()
+        if isinstance(other, Bool):
+            q.must += other.must
+            q.should += other.should
+            q.must_not += other.must_not
+            q.filter += other.filter
+        else:
+            q.must.append(other)
+        return q
+
+    __radd__ = __add__
+
+    def __or__(self, other: Query) -> Query:
+        for q in (self, other):
+            if isinstance(q, Bool) and not any(
+                (q.must, q.must_not, q.filter, getattr(q, "minimum_should_match", None))
+            ):
+                other = self if q is other else other
+                q = q._clone()
+                if isinstance(other, Bool) and not any(
+                    (
+                        other.must,
+                        other.must_not,
+                        other.filter,
+                        getattr(other, "minimum_should_match", None),
+                    )
+                ):
+                    q.should.extend(other.should)
+                else:
+                    q.should.append(other)
+                return q
+
+        return Bool(should=[self, other])
+
+    __ror__ = __or__
+
+    @property
+    def _min_should_match(self) -> int:
+        return getattr(
+            self,
+            "minimum_should_match",
+            0 if not self.should or (self.must or self.filter) else 1,
+        )
+
+    def __invert__(self) -> Query:
+        # Because an empty Bool query is treated like
+        # MatchAll the inverse should be MatchNone
+        if not any(chain(self.must, self.filter, self.should, self.must_not)):
+            return MatchNone()
+
+        negations: List[Query] = []
+        for q in chain(self.must, self.filter):
+            negations.append(~q)
+
+        for q in self.must_not:
+            negations.append(q)
+
+        if self.should and self._min_should_match:
+            negations.append(Bool(must_not=self.should[:]))
+
+        if len(negations) == 1:
+            return negations[0]
+        return Bool(should=negations)
+
+    def __and__(self, other: Query) -> Query:
+        q = self._clone()
+        if isinstance(other, Bool):
+            q.must += other.must
+            q.must_not += other.must_not
+            q.filter += other.filter
+            q.should = []
+
+            # reset minimum_should_match as it will get calculated below
+            if "minimum_should_match" in q._params:
+                del q._params["minimum_should_match"]
+
+            for qx in (self, other):
+                min_should_match = qx._min_should_match
+                # TODO: percentages or negative numbers will fail here
+                # for now we report an error
+                if not isinstance(min_should_match, int) or min_should_match < 0:
+                    raise ValueError(
+                        "Can only combine queries with positive integer values for minimum_should_match"
+                    )
+                # all subqueries are required
+                if len(qx.should) <= min_should_match:
+                    q.must.extend(qx.should)
+                # not all of them are required, use it and remember min_should_match
+                elif not q.should:
+                    q.minimum_should_match = min_should_match
+                    q.should = qx.should
+                # all queries are optional, just extend should
+                elif q._min_should_match == 0 and min_should_match == 0:
+                    q.should.extend(qx.should)
+                # not all are required, add a should list to the must with proper min_should_match
+                else:
+                    q.must.append(
+                        Bool(should=qx.should, minimum_should_match=min_should_match)
+                    )
+        else:
+            if not (q.must or q.filter) and q.should:
+                q._params.setdefault("minimum_should_match", 1)
+            q.must.append(other)
+        return q
+
+    __rand__ = __and__
+
+    {% elif k.name == "Terms" %}
+    def _setattr(self, name: str, value: Any) -> None:
+        # here we convert any iterables that are not strings to lists
+        if hasattr(value, "__iter__") and not isinstance(value, (str, list, dict)):
+            value = list(value)
+        super()._setattr(name, value)
+
+    {% endif %}
+
+{% endfor %}
diff -pruN 8.17.2-2/utils/templates/response.__init__.py.tpl 9.2.0-1/utils/templates/response.__init__.py.tpl
--- 8.17.2-2/utils/templates/response.__init__.py.tpl	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/utils/templates/response.__init__.py.tpl	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,225 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Generic,
+    Iterator,
+    List,
+    Mapping,
+    Optional,
+    Sequence,
+    Tuple,
+    Union,
+    cast,
+)
+
+from ..utils import _R, AttrDict, AttrList, _wrap
+from .hit import Hit, HitMeta
+
+if TYPE_CHECKING:
+    from ..aggs import Agg
+    from ..faceted_search_base import FacetedSearchBase
+    from ..search_base import Request, SearchBase
+    from ..update_by_query_base import UpdateByQueryBase
+    from .. import types
+
+__all__ = ["Response", "AggResponse", "UpdateByQueryResponse", "Hit", "HitMeta", "AggregateResponseType"]
+
+
+class Response(AttrDict[Any], Generic[_R]):
+    """An Elasticsearch search response.
+
+    {% for arg in response.args %}
+        {% for line in arg.doc %}
+    {{ line }}
+        {% endfor %}
+    {% endfor %}
+    """
+    _search: "SearchBase[_R]"
+    _faceted_search: "FacetedSearchBase[_R]"
+    _doc_class: Optional[_R]
+    _hits: List[_R]
+
+    {% for arg in response.args %}
+        {% if arg.name not in ["hits", "aggregations"] %}
+    {{ arg.name }}: {{ arg.type }}
+        {% endif %}
+    {% endfor %}
+
+    def __init__(
+        self,
+        search: "Request[_R]",
+        response: Dict[str, Any],
+        doc_class: Optional[_R] = None,
+    ):
+        super(AttrDict, self).__setattr__("_search", search)
+        super(AttrDict, self).__setattr__("_doc_class", doc_class)
+        super().__init__(response)
+
+    def __iter__(self) -> Iterator[_R]:  # type: ignore[override]
+        return iter(self.hits)
+
+    def __getitem__(self, key: Union[slice, int, str]) -> Any:
+        if isinstance(key, (slice, int)):
+            # for slicing etc
+            return self.hits[key]
+        return super().__getitem__(key)
+
+    def __nonzero__(self) -> bool:
+        return bool(self.hits)
+
+    __bool__ = __nonzero__
+
+    def __repr__(self) -> str:
+        return "<Response: %r>" % (self.hits or self.aggregations)
+
+    def __len__(self) -> int:
+        return len(self.hits)
+
+    def __getstate__(self) -> Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]:  # type: ignore[override]
+        return self._d_, self._search, self._doc_class
+
+    def __setstate__(
+        self, state: Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]  # type: ignore[override]
+    ) -> None:
+        super(AttrDict, self).__setattr__("_d_", state[0])
+        super(AttrDict, self).__setattr__("_search", state[1])
+        super(AttrDict, self).__setattr__("_doc_class", state[2])
+
+    def success(self) -> bool:
+        return self._shards.total == self._shards.successful and not self.timed_out
+
+    @property
+    def hits(self) -> List[_R]:
+        if not hasattr(self, "_hits"):
+            h = cast(AttrDict[Any], self._d_["hits"])
+
+            try:
+                hits = AttrList(list(map(self._search._get_result, h["hits"])))
+            except AttributeError as e:
+                # avoid raising AttributeError since it will be hidden by the property
+                raise TypeError("Could not parse hits.", e)
+
+            # avoid assigning _hits into self._d_
+            super(AttrDict, self).__setattr__("_hits", hits)
+            for k in h:
+                setattr(self._hits, k, _wrap(h[k]))
+        return self._hits
+
+    @property
+    def aggregations(self) -> "AggResponse[_R]":
+        return self.aggs
+
+    @property
+    def aggs(self) -> "AggResponse[_R]":
+        if not hasattr(self, "_aggs"):
+            aggs = AggResponse[_R](
+                cast("Agg[_R]", self._search.aggs),
+                self._search,
+                cast(Dict[str, Any], self._d_.get("aggregations", {})),
+            )
+
+            # avoid assigning _aggs into self._d_
+            super(AttrDict, self).__setattr__("_aggs", aggs)
+        return cast("AggResponse[_R]", self._aggs)
+
+    def search_after(self) -> "SearchBase[_R]":
+        """
+        Return a ``Search`` instance that retrieves the next page of results.
+
+        This method provides an easy way to paginate a long list of results using
+        the ``search_after`` option. For example::
+
+            page_size = 20
+            s = Search()[:page_size].sort("date")
+
+            while True:
+                # get a page of results
+                r = await s.execute()
+
+                # do something with this page of results
+
+                # exit the loop if we reached the end
+                if len(r.hits) < page_size:
+                    break
+
+                # get a search object with the next page of results
+                s = r.search_after()
+
+        Note that the ``search_after`` option requires the search to have an
+        explicit ``sort`` order.
+        """
+        if len(self.hits) == 0:
+            raise ValueError("Cannot use search_after when there are no search results")
+        if not hasattr(self.hits[-1].meta, "sort"):  # type: ignore[attr-defined]
+            raise ValueError("Cannot use search_after when results are not sorted")
+        return self._search.extra(search_after=self.hits[-1].meta.sort)  # type: ignore[attr-defined]
+
+
+AggregateResponseType = {{ response["aggregate_type"] }}
+
+
+class AggResponse(AttrDict[Any], Generic[_R]):
+    """An Elasticsearch aggregation response."""
+    _meta: Dict[str, Any]
+
+    def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]):
+        super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs})
+        super().__init__(data)
+
+    def __getitem__(self, attr_name: str) -> AggregateResponseType:
+        if attr_name in self._meta["aggs"]:
+            # don't do self._meta['aggs'][attr_name] to avoid copying
+            agg = self._meta["aggs"].aggs[attr_name]
+            return cast(AggregateResponseType, agg.result(self._meta["search"], self._d_[attr_name]))
+        return super().__getitem__(attr_name)  # type: ignore[no-any-return]
+
+    def __iter__(self) -> Iterator[AggregateResponseType]:  # type: ignore[override]
+        for name in self._meta["aggs"]:
+            yield self[name]
+
+
+class UpdateByQueryResponse(AttrDict[Any], Generic[_R]):
+    """An Elasticsearch update by query response.
+
+    {% for arg in ubq_response.args %}
+        {% for line in arg.doc %}
+    {{ line }}
+        {% endfor %}
+    {% endfor %}
+    """
+    _search: "UpdateByQueryBase[_R]"
+
+    {% for arg in ubq_response.args %}
+    {{ arg.name }}: {{ arg.type }}
+    {% endfor %}
+
+    def __init__(
+        self,
+        search: "Request[_R]",
+        response: Dict[str, Any],
+        doc_class: Optional[_R] = None,
+    ):
+        super(AttrDict, self).__setattr__("_search", search)
+        super(AttrDict, self).__setattr__("_doc_class", doc_class)
+        super().__init__(response)
+
+    def success(self) -> bool:
+        return not self.timed_out and not self.failures
diff -pruN 8.17.2-2/utils/templates/types.py.tpl 9.2.0-1/utils/templates/types.py.tpl
--- 8.17.2-2/utils/templates/types.py.tpl	1970-01-01 00:00:00.000000000 +0000
+++ 9.2.0-1/utils/templates/types.py.tpl	2025-10-28 16:50:53.000000000 +0000
@@ -0,0 +1,107 @@
+#  Licensed to Elasticsearch B.V. under one or more contributor
+#  license agreements. See the NOTICE file distributed with
+#  this work for additional information regarding copyright
+#  ownership. Elasticsearch B.V. licenses this file to you under
+#  the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+# 	http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing,
+#  software distributed under the License is distributed on an
+#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+#  KIND, either express or implied.  See the License for the
+#  specific language governing permissions and limitations
+#  under the License.
+
+from typing import Any, Dict, Literal, Mapping, Sequence, Union
+
+from elastic_transport.client_utils import DEFAULT, DefaultType
+
+from . import Query
+from .document_base import InstrumentedField
+from .utils import AttrDict
+
+PipeSeparatedFlags = str
+
+
+{% for k in classes %}
+class {{ k.name }}({{ k.parent if k.parent else "AttrDict[Any]" }}):
+    {% if k.docstring or k.args %}
+    """
+        {% for line in k.docstring %}
+    {{ line }}
+        {% endfor %}
+        {% if k.args %}
+            {% if k.docstring %}
+
+            {% endif %}
+        {% endif %}
+        {% for arg in k.args %}
+            {% for line in arg.doc %}
+    {{ line }}
+            {% endfor %}
+        {% endfor %}
+    """
+        {% for arg in k.args %}
+            {% if arg.name not in ["keys", "items"] %}
+    {{ arg.name }}: {{ arg.type }}
+            {% else %}
+    {{ arg.name }}: {{ arg.type }}  # type: ignore[assignment]
+            {% endif %}
+        {% endfor %}
+        {% if not k.for_response %}
+
+    def __init__(
+        self,
+            {% for arg in k.args %}
+                {% if arg.positional %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+                {% endif %}
+            {% endfor %}
+            {% if k.args and not k.args[-1].positional %}
+        *,
+            {% endif %}
+            {% for arg in k.args %}
+                {% if not arg.positional %}
+        {{ arg.name }}: {{ arg.type }} = DEFAULT,
+                {% endif %}
+            {% endfor %}
+        **kwargs: Any
+    ):
+            {% if k.is_single_field %}
+        if _field is not DEFAULT:
+            kwargs[str(_field)] = _value
+            {% elif k.is_multi_field %}
+        if _fields is not DEFAULT:
+            for field, value in _fields.items():
+                kwargs[str(field)] = value
+            {% endif %}
+            {% for arg in k.args %}
+                {% if not arg.positional %}
+        if {{ arg.name }} is not DEFAULT:
+                    {% if "InstrumentedField" in arg.type %}
+            kwargs["{{ arg.name }}"] = str({{ arg.name }})
+                    {% else %}
+            kwargs["{{ arg.name }}"] = {{ arg.name }}
+                    {% endif %}
+                {% endif %}
+            {% endfor %}
+            {% if k.parent %}
+        super().__init__(**kwargs)
+            {% else %}
+        super().__init__(kwargs)
+            {% endif %}
+        {% endif %}
+        {% if k.buckets_as_dict %}
+
+    @property
+    def buckets_as_dict(self) -> Mapping[str, {{ k.buckets_as_dict }}]:
+        return self.buckets  # type: ignore[return-value]
+        {% endif %}
+    {% else %}
+    pass
+    {% endif %}
+
+{% endfor %}
