diff -pruN 8.0.21-1/.github/workflows/docs-build.yml 9.0.0-1/.github/workflows/docs-build.yml
--- 8.0.21-1/.github/workflows/docs-build.yml	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/.github/workflows/docs-build.yml	2025-10-03 20:58:42.000000000 +0000
@@ -17,4 +17,4 @@ jobs:
       deployments: write
       id-token: write
       contents: read
-      pull-requests: read
+      pull-requests: write
diff -pruN 8.0.21-1/Dockerfile 9.0.0-1/Dockerfile
--- 8.0.21-1/Dockerfile	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/Dockerfile	2025-10-03 20:58:42.000000000 +0000
@@ -1,6 +1,6 @@
 # syntax=docker/dockerfile:1
-ARG PYVER=3.12.9
-ARG ALPTAG=3.21
+ARG PYVER=3.13.7
+ARG ALPTAG=3.22
 FROM python:${PYVER}-alpine${ALPTAG} AS builder
 
 # Add the community repo for access to patchelf binary package
diff -pruN 8.0.21-1/curator/_version.py 9.0.0-1/curator/_version.py
--- 8.0.21-1/curator/_version.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/_version.py	2025-10-03 20:58:42.000000000 +0000
@@ -1,3 +1,3 @@
 """Curator Version"""
 
-__version__ = '8.0.21'
+__version__ = '9.0.0'
diff -pruN 8.0.21-1/curator/actions/close.py 9.0.0-1/curator/actions/close.py
--- 8.0.21-1/curator/actions/close.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/actions/close.py	2025-10-03 20:58:42.000000000 +0000
@@ -2,7 +2,7 @@
 
 import logging
 import warnings
-from elasticsearch8.exceptions import ElasticsearchWarning
+from elasticsearch9.exceptions import ElasticsearchWarning
 from curator.helpers.testers import verify_index_list
 from curator.helpers.utils import chunk_index_list, report_failure, show_dry_run, to_csv
 
diff -pruN 8.0.21-1/curator/actions/create_index.py 9.0.0-1/curator/actions/create_index.py
--- 8.0.21-1/curator/actions/create_index.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/actions/create_index.py	2025-10-03 20:58:42.000000000 +0000
@@ -3,7 +3,7 @@
 import logging
 
 # pylint: disable=import-error, broad-except
-from elasticsearch8.exceptions import RequestError
+from elasticsearch9.exceptions import RequestError
 from curator.exceptions import ConfigurationError, FailedExecution
 from curator.helpers.date_ops import parse_date_pattern
 from curator.helpers.utils import report_failure
diff -pruN 8.0.21-1/curator/actions/reindex.py 9.0.0-1/curator/actions/reindex.py
--- 8.0.21-1/curator/actions/reindex.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/actions/reindex.py	2025-10-03 20:58:42.000000000 +0000
@@ -6,7 +6,7 @@ from dotmap import DotMap  # type: ignor
 
 # pylint: disable=broad-except, R0902,R0912,R0913,R0914,R0915
 from es_client.builder import Builder
-from es_client.helpers.utils import ensure_list, verify_url_schema
+from es_client.utils import ensure_list, verify_url_schema
 from es_client.exceptions import ConfigurationError
 from curator.exceptions import CuratorException, FailedExecution, NoIndices
 
diff -pruN 8.0.21-1/curator/actions/snapshot.py 9.0.0-1/curator/actions/snapshot.py
--- 8.0.21-1/curator/actions/snapshot.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/actions/snapshot.py	2025-10-03 20:58:42.000000000 +0000
@@ -2,7 +2,7 @@
 
 import logging
 import re
-from es_client.helpers.utils import ensure_list
+from es_client.utils import ensure_list
 from curator.helpers.date_ops import parse_datemath, parse_date_pattern
 from curator.helpers.getters import get_indices
 from curator.helpers.testers import (
diff -pruN 8.0.21-1/curator/classdef.py 9.0.0-1/curator/classdef.py
--- 8.0.21-1/curator/classdef.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/classdef.py	2025-10-03 20:58:42.000000000 +0000
@@ -2,8 +2,8 @@
 
 import logging
 from es_client.exceptions import FailedValidation
-from es_client.helpers.schemacheck import password_filter
-from es_client.helpers.utils import get_yaml
+from es_client.schemacheck import password_filter
+from es_client.utils import get_yaml
 from curator import IndexList, SnapshotList
 from curator.actions import CLASS_MAP
 from curator.exceptions import ConfigurationError
diff -pruN 8.0.21-1/curator/cli.py 9.0.0-1/curator/cli.py
--- 8.0.21-1/curator/cli.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/cli.py	2025-10-03 20:58:42.000000000 +0000
@@ -4,7 +4,7 @@ import sys
 import logging
 import click
 from es_client.defaults import OPTION_DEFAULTS
-from es_client.helpers.config import (
+from es_client.config import (
     cli_opts,
     context_settings,
     generate_configdict,
@@ -12,8 +12,8 @@ from es_client.helpers.config import (
     get_config,
     options_from_dict,
 )
-from es_client.helpers.logging import configure_logging
-from es_client.helpers.utils import option_wrapper, prune_nones
+from es_client.logging import configure_logging
+from es_client.utils import option_wrapper, prune_nones
 from curator.exceptions import ClientException
 from curator.classdef import ActionsFile
 from curator.defaults.settings import (
diff -pruN 8.0.21-1/curator/cli_singletons/object_class.py 9.0.0-1/curator/cli_singletons/object_class.py
--- 8.0.21-1/curator/cli_singletons/object_class.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/cli_singletons/object_class.py	2025-10-03 20:58:42.000000000 +0000
@@ -6,8 +6,8 @@ import sys
 from voluptuous import Schema
 from es_client.builder import Builder
 from es_client.exceptions import FailedValidation
-from es_client.helpers.schemacheck import SchemaCheck
-from es_client.helpers.utils import prune_nones
+from es_client.schemacheck import SchemaCheck
+from es_client.utils import prune_nones
 from curator import IndexList, SnapshotList
 from curator.actions import (
     Alias,
diff -pruN 8.0.21-1/curator/cli_singletons/rollover.py 9.0.0-1/curator/cli_singletons/rollover.py
--- 8.0.21-1/curator/cli_singletons/rollover.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/cli_singletons/rollover.py	2025-10-03 20:58:42.000000000 +0000
@@ -1,7 +1,7 @@
 """Index Rollover Singleton"""
 
 import click
-from es_client.helpers.utils import prune_nones
+from es_client.utils import prune_nones
 from curator.cli_singletons.object_class import CLIAction
 from curator.cli_singletons.utils import json_to_dict
 
diff -pruN 8.0.21-1/curator/cli_singletons/utils.py 9.0.0-1/curator/cli_singletons/utils.py
--- 8.0.21-1/curator/cli_singletons/utils.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/cli_singletons/utils.py	2025-10-03 20:58:42.000000000 +0000
@@ -2,7 +2,7 @@
 
 import json
 from click import BadParameter
-from es_client.helpers.utils import ensure_list
+from es_client.utils import ensure_list
 
 
 # Click functions require ctx and param to be passed positionally even if not used
diff -pruN 8.0.21-1/curator/defaults/settings.py 9.0.0-1/curator/defaults/settings.py
--- 8.0.21-1/curator/defaults/settings.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/defaults/settings.py	2025-10-03 20:58:42.000000000 +0000
@@ -15,8 +15,8 @@ EXCLUDE_SYSTEM = (
     '-.kibana*,-.security*,-.watch*,-.triggered_watch*,'
     '-.ml*,-.geoip_databases*,-.logstash*,-.tasks*'
 )
-VERSION_MIN = (7, 14, 0)
-VERSION_MAX = (8, 99, 99)
+VERSION_MIN = (9, 0, 0)
+VERSION_MAX = (9, 99, 99)
 
 # Click specifics
 
diff -pruN 8.0.21-1/curator/helpers/date_ops.py 9.0.0-1/curator/helpers/date_ops.py
--- 8.0.21-1/curator/helpers/date_ops.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/helpers/date_ops.py	2025-10-03 20:58:42.000000000 +0000
@@ -6,7 +6,7 @@ import re
 import string
 import time
 from datetime import timedelta, datetime, timezone
-from elasticsearch8.exceptions import NotFoundError
+from elasticsearch9.exceptions import NotFoundError
 from curator.exceptions import ConfigurationError
 from curator.defaults.settings import date_regex
 
diff -pruN 8.0.21-1/curator/helpers/getters.py 9.0.0-1/curator/helpers/getters.py
--- 8.0.21-1/curator/helpers/getters.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/helpers/getters.py	2025-10-03 20:58:42.000000000 +0000
@@ -1,7 +1,7 @@
 """Utility functions that get things"""
 
 import logging
-from elasticsearch8 import exceptions as es8exc
+from elasticsearch9 import exceptions as es8exc
 from curator.defaults.settings import EXCLUDE_SYSTEM
 from curator.exceptions import (
     ConfigurationError,
diff -pruN 8.0.21-1/curator/helpers/testers.py 9.0.0-1/curator/helpers/testers.py
--- 8.0.21-1/curator/helpers/testers.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/helpers/testers.py	2025-10-03 20:58:42.000000000 +0000
@@ -2,10 +2,10 @@
 
 import logging
 from voluptuous import Schema
-from elasticsearch8 import Elasticsearch
-from elasticsearch8.exceptions import NotFoundError
-from es_client.helpers.schemacheck import SchemaCheck
-from es_client.helpers.utils import prune_nones
+from elasticsearch9 import Elasticsearch
+from elasticsearch9.exceptions import NotFoundError
+from es_client.schemacheck import SchemaCheck
+from es_client.utils import prune_nones
 from curator.helpers.getters import get_repository, get_write_index
 from curator.exceptions import (
     ConfigurationError,
diff -pruN 8.0.21-1/curator/helpers/utils.py 9.0.0-1/curator/helpers/utils.py
--- 8.0.21-1/curator/helpers/utils.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/helpers/utils.py	2025-10-03 20:58:42.000000000 +0000
@@ -5,7 +5,7 @@ The kind that don't fit in testers, gett
 
 import re
 import logging
-from es_client.helpers.utils import ensure_list
+from es_client.utils import ensure_list
 from curator.exceptions import FailedExecution
 
 logger = logging.getLogger(__name__)
diff -pruN 8.0.21-1/curator/helpers/waiters.py 9.0.0-1/curator/helpers/waiters.py
--- 8.0.21-1/curator/helpers/waiters.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/helpers/waiters.py	2025-10-03 20:58:42.000000000 +0000
@@ -7,7 +7,7 @@ import logging
 import warnings
 from time import localtime, sleep, strftime
 from datetime import datetime
-from elasticsearch8.exceptions import GeneralAvailabilityWarning
+from elasticsearch9.exceptions import GeneralAvailabilityWarning
 from curator.exceptions import (
     ActionTimeout,
     ConfigurationError,
diff -pruN 8.0.21-1/curator/indexlist.py 9.0.0-1/curator/indexlist.py
--- 8.0.21-1/curator/indexlist.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/indexlist.py	2025-10-03 20:58:42.000000000 +0000
@@ -4,9 +4,9 @@
 import re
 import itertools
 import logging
-from elasticsearch8.exceptions import NotFoundError, TransportError
-from es_client.helpers.schemacheck import SchemaCheck
-from es_client.helpers.utils import ensure_list
+from elasticsearch9.exceptions import NotFoundError, TransportError
+from es_client.schemacheck import SchemaCheck
+from es_client.utils import ensure_list
 from curator.defaults import settings
 from curator.exceptions import (
     ActionError,
diff -pruN 8.0.21-1/curator/repomgrcli.py 9.0.0-1/curator/repomgrcli.py
--- 8.0.21-1/curator/repomgrcli.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/repomgrcli.py	2025-10-03 20:58:42.000000000 +0000
@@ -4,17 +4,17 @@ import sys
 import logging
 import pprint
 import click
-from elasticsearch8 import ApiError, NotFoundError
+from elasticsearch9 import ApiError, NotFoundError
 from es_client.defaults import LOGGING_SETTINGS, SHOW_OPTION
 from es_client.builder import Builder
-from es_client.helpers.config import (
+from es_client.config import (
     cli_opts,
     context_settings,
     generate_configdict,
     get_config,
 )
-from es_client.helpers.logging import configure_logging
-from es_client.helpers.utils import option_wrapper
+from es_client.logging import configure_logging
+from es_client.utils import option_wrapper
 from curator.defaults.settings import (
     CLICK_DRYRUN,
     VERSION_MAX,
diff -pruN 8.0.21-1/curator/singletons.py 9.0.0-1/curator/singletons.py
--- 8.0.21-1/curator/singletons.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/singletons.py	2025-10-03 20:58:42.000000000 +0000
@@ -2,15 +2,15 @@
 
 import click
 from es_client.defaults import SHOW_EVERYTHING
-from es_client.helpers.config import (
+from es_client.config import (
     cli_opts,
     context_settings,
     generate_configdict,
     get_config,
     options_from_dict,
 )
-from es_client.helpers.logging import configure_logging
-from es_client.helpers.utils import option_wrapper
+from es_client.logging import configure_logging
+from es_client.utils import option_wrapper
 from curator.defaults.settings import CLICK_DRYRUN, default_config_file, footer
 from curator._version import __version__
 from curator.cli_singletons import (
diff -pruN 8.0.21-1/curator/snapshotlist.py 9.0.0-1/curator/snapshotlist.py
--- 8.0.21-1/curator/snapshotlist.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/snapshotlist.py	2025-10-03 20:58:42.000000000 +0000
@@ -2,7 +2,7 @@
 
 import re
 import logging
-from es_client.helpers.schemacheck import SchemaCheck
+from es_client.schemacheck import SchemaCheck
 from curator.exceptions import (
     ConfigurationError,
     FailedExecution,
diff -pruN 8.0.21-1/curator/validators/actions.py 9.0.0-1/curator/validators/actions.py
--- 8.0.21-1/curator/validators/actions.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/validators/actions.py	2025-10-03 20:58:42.000000000 +0000
@@ -1,7 +1,7 @@
 """Validate root ``actions`` and individual ``action`` Schemas"""
 
 from voluptuous import Any, In, Schema, Optional, Required
-from es_client.helpers.schemacheck import SchemaCheck
+from es_client.schemacheck import SchemaCheck
 from curator.defaults import settings
 
 
diff -pruN 8.0.21-1/curator/validators/filter_functions.py 9.0.0-1/curator/validators/filter_functions.py
--- 8.0.21-1/curator/validators/filter_functions.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/curator/validators/filter_functions.py	2025-10-03 20:58:42.000000000 +0000
@@ -2,8 +2,8 @@
 
 import logging
 from voluptuous import Any, In, Required, Schema
-from es_client.helpers.schemacheck import SchemaCheck
-from es_client.helpers.utils import prune_nones
+from es_client.schemacheck import SchemaCheck
+from es_client.utils import prune_nones
 from curator.defaults import settings, filtertypes
 from curator.exceptions import ConfigurationError
 
diff -pruN 8.0.21-1/debian/changelog 9.0.0-1/debian/changelog
--- 8.0.21-1/debian/changelog	2025-04-02 05:46:15.000000000 +0000
+++ 9.0.0-1/debian/changelog	2025-10-08 10:35:43.000000000 +0000
@@ -1,3 +1,14 @@
+elasticsearch-curator (9.0.0-1) unstable; urgency=medium
+
+  * New upstream version 9.0.0
+  * Rediff patches
+  * d/control: add Breaks: python3-es-client (<< 9.0.0-1~),
+               change to elasticsearch9 (Closes: #1110793)
+  * d/control: Drop Rules-Requires-Root: no,
+               default since dpkg 1.22.13
+
+ -- Karsten Schöke <karsten.schoeke@geobasis-bb.de>  Wed, 08 Oct 2025 12:35:43 +0200
+
 elasticsearch-curator (8.0.21-1) unstable; urgency=medium
 
   * New upstream version 8.0.21
diff -pruN 8.0.21-1/debian/control 9.0.0-1/debian/control
--- 8.0.21-1/debian/control	2025-04-02 05:46:15.000000000 +0000
+++ 9.0.0-1/debian/control	2025-10-08 10:35:43.000000000 +0000
@@ -30,7 +30,6 @@ Standards-Version: 4.7.2
 Homepage: https://github.com/elasticsearch/curator
 Vcs-Browser: https://salsa.debian.org/python-team/packages/elasticsearch-curator
 Vcs-Git: https://salsa.debian.org/python-team/packages/elasticsearch-curator.git
-Rules-Requires-Root: no
 Testsuite: autopkgtest-pkg-pybuild
 
 Package: elasticsearch-curator
@@ -39,6 +38,7 @@ Section: admin
 Depends: python3-elasticsearch-curator (= ${binary:Version}),
          ${misc:Depends},
          ${python3:Depends},
+Breaks: python3-es-client (<< 9.0.0-1~),
 Description: command-line tool for managing Elasticsearch time-series indices
  Elasticsearch Curator helps manage Elasticsearch time-series indices. It
  consists of a Python library and a CLI tool providing a high-level interface
@@ -56,8 +56,9 @@ Package: python3-elasticsearch-curator
 Architecture: all
 Depends: ${misc:Depends},
          ${python3:Depends},
-         python3-elasticsearch8,
+         python3-elasticsearch9,
 Suggests: python-elasticsearch-curator-doc,
+Breaks: python3-es-client (<< 9.0.0-1~),
 Description: Python 3 library for managing Elasticsearch time-series indices
  Elasticsearch Curator helps manage Elasticsearch time-series indices. It
  consists of a Python library and a CLI tool providing a high-level interface
diff -pruN 8.0.21-1/debian/patches/0002-sphinx-use-local-inventory-references-for-intersphin.patch 9.0.0-1/debian/patches/0002-sphinx-use-local-inventory-references-for-intersphin.patch
--- 8.0.21-1/debian/patches/0002-sphinx-use-local-inventory-references-for-intersphin.patch	2025-04-02 05:46:15.000000000 +0000
+++ 9.0.0-1/debian/patches/0002-sphinx-use-local-inventory-references-for-intersphin.patch	2025-10-08 10:31:59.000000000 +0000
@@ -7,28 +7,24 @@ fail if intersphinx was to fetch invento
 
 Closes: #830550
 
-Last-Update: 2025-03-30
 ---
- docs/conf.py | 11 ++++++-----
- 1 file changed, 6 insertions(+), 5 deletions(-)
+Last-Update: 2025-10-08
 
-diff --git a/docs/conf.py b/docs/conf.py
-index 7925847..a78ab50 100644
---- a/docs/conf.py
-+++ b/docs/conf.py
-@@ -71,9 +71,10 @@ if not on_rtd:  # only import and set the theme if we're building docs locally
+--- elasticsearch-curator-9.0.0.orig/docs/conf.py
++++ elasticsearch-curator-9.0.0/docs/conf.py
+@@ -71,9 +71,10 @@ if not on_rtd:  # only import and set th
      html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
- 
+
  intersphinx_mapping = {
--    'python': ('https://docs.python.org/3.12', None),
--    'es_client': ('https://es-client.readthedocs.io/en/v8.17.5', None),
--    'elasticsearch8': ('https://elasticsearch-py.readthedocs.io/en/v8.17.2', None),
+-    'python': ('https://docs.python.org/3.13', None),
+-    'es_client': ('https://es-client.readthedocs.io/en/v9.0.0', None),
+-    'elasticsearch9': ('https://elasticsearch-py.readthedocs.io/en/v9.1.0', None),
 -    'voluptuous': ('http://alecthomas.github.io/voluptuous/docs/_build/html', None),
--    'click': ('https://click.palletsprojects.com/en/8.1.x', None),
-+    'python': ('https://docs.python.org/3.12',
-+               "/usr/share/doc/python3-doc/html/objects.inv"),
-+    'elasticsearch8': ('https://elasticsearch-py.readthedocs.io/en/v8.17.1',
-+                       "/usr/share/doc/python-elasticsearch-doc/html/objects.inv"),
-+    'click': ('https://click.palletsprojects.com/en/8.1.x',
-+              "/usr/share/doc/python-click-doc/html/objects.inv"),
+-    'click': ('https://click.palletsprojects.com/en/stable', None),
++    'python': ('https://docs.python.org/3.13',
++               '/usr/share/doc/python3-doc/html/objects.inv'),
++    'elasticsearch9': ('https://elasticsearch-py.readthedocs.io/en/v9.1.0',
++                       '/usr/share/doc/python-elasticsearch-doc/html/objects.inv'),
++    'click': ('https://click.palletsprojects.com/en/stable',
++              '/usr/share/doc/python-click-doc/html/objects.inv'),
  }
diff -pruN 8.0.21-1/docker_test/scripts/create.sh 9.0.0-1/docker_test/scripts/create.sh
--- 8.0.21-1/docker_test/scripts/create.sh	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docker_test/scripts/create.sh	2025-10-03 20:58:42.000000000 +0000
@@ -25,7 +25,7 @@ REMOTE_PORT=9201
 LOCAL_URL=http://127.0.0.1:${LOCAL_PORT}
 REMOTE_URL=http://127.0.0.1:${REMOTE_PORT}
 
-if [[ "$MAJOR" -eq 8 ]]; then
+if [[ "$MAJOR" -ge 8 ]]; then
   MONITORING="xpack.monitoring.templates.enabled"
   IMAGE=docker.elastic.co/elasticsearch/elasticsearch
 elif [[ "$MAJOR" -eq 7 ]]; then
@@ -153,4 +153,4 @@ for IP in $IPLIST; do
 done
 
 echo
-echo "Ready to test!"
\ No newline at end of file
+echo "Ready to test!"
diff -pruN 8.0.21-1/docs/Changelog.rst 9.0.0-1/docs/Changelog.rst
--- 8.0.21-1/docs/Changelog.rst	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/Changelog.rst	2025-10-03 20:58:42.000000000 +0000
@@ -3,6 +3,40 @@
 Changelog
 =========
 
+[9.0.0] - 2025-10-03
+--------------------
+
+**BREAKING CHANGE**
+
+Curator 9.x requires Python 3.10 or later. Python 3.8 and 3.9 are no longer supported. It has not yet been tested with Python 3.14.
+
+**Announcement**
+
+This the first major release supporting Elasticsearch 9.x. Only Elasticsearch 9 is supported by this release. If you need to use Curator with Elasticsearch 8.x, please use Curator 8.0.21 or earlier.
+
+All tests pass in Python versions 3.10.13, 3.11.8, 3.12.9, and 3.13.7.
+
+The Docker image is now based on ``python:3.13.7-alpine3.22``. The release candidate was built and tested against an Elastic Cloud instance.
+
+
+
+**Changes**
+
+  * Update to use ``es_client==9.0.0``, which supports Elasticsearch 9.x
+  * Update all references to ``elasticsearch8`` to use ``elasticsearch9``
+  * Remove all references to deprecated ``es_client.helpers`` modules.
+  * Set ``VERSION_MIN`` to ``9.0.0`` and ``VERSION_MAX`` to ``9.99.99`` in ``curator.defaults`` to
+    prevent accidental use of Curator 9.x with Elasticsearch 8.x and earlier.
+  * Allow docker_test/scripts/create.sh to accept ``-ge 8`` versions of Elasticsearch.
+  * Bump versions in docs/asciidoc files.
+  * Remove Python versions 3.8 and 3.9 from acceptable versions. ``requires-python = ">=3.10"``, due to the changes in ``click>=8.2.0``
+  * Add Python 3.13 to the supported versions.
+  * Automated linter reformatting of some files, but no code changes.
+  * Fix ``Dockerfile`` to use ``alpine3.22`` with Python 3.13.7
+  * Update ``pyproject.toml`` to reflect new dependencies and versions.
+  * Update tests to reflect changes in ``click==8.3.0``. Mostly using ``result.stdout_bytes`` instead of ``result.output``.
+
+
 8.0.21 (1 April 2025)
 ---------------------
 
diff -pruN 8.0.21-1/docs/asciidoc/filter_elements.asciidoc 9.0.0-1/docs/asciidoc/filter_elements.asciidoc
--- 8.0.21-1/docs/asciidoc/filter_elements.asciidoc	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/asciidoc/filter_elements.asciidoc	2025-10-03 20:58:42.000000000 +0000
@@ -116,7 +116,7 @@ The default value for this setting is `r
 
 
 [[fe_count]]
-== count
+== {es} Curator count filter setting
 
 NOTE: This setting is only used with the <<filtertype_count,count>> filtertype +
     and is a required setting.
diff -pruN 8.0.21-1/docs/asciidoc/index.asciidoc 9.0.0-1/docs/asciidoc/index.asciidoc
--- 8.0.21-1/docs/asciidoc/index.asciidoc	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/asciidoc/index.asciidoc	2025-10-03 20:58:42.000000000 +0000
@@ -1,10 +1,10 @@
-:curator_version: 8.0.21
-:curator_major: 8
-:curator_doc_tree: 8.0
-:es_py_version: 8.17.2
-:es_doc_tree: 8.17
-:stack_doc_tree: 8.17
-:pybuild_ver: 3.12.9
+:curator_version: 9.0.0
+:curator_major: 9
+:curator_doc_tree: 9.0
+:es_py_version: 9.1.0
+:es_doc_tree: 9.1
+:stack_doc_tree: 9.1
+:pybuild_ver: 3.13.7
 :copyright_years: 2011-2025
 :ref:  http://www.elastic.co/guide/en/elasticsearch/reference/{es_doc_tree}
 :esref: http://www.elastic.co/guide/en/elasticsearch/reference/{stack_doc_tree}
diff -pruN 8.0.21-1/docs/conf.py 9.0.0-1/docs/conf.py
--- 8.0.21-1/docs/conf.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/conf.py	2025-10-03 20:58:42.000000000 +0000
@@ -71,9 +71,9 @@ if not on_rtd:  # only import and set th
     html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
 
 intersphinx_mapping = {
-    'python': ('https://docs.python.org/3.12', None),
-    'es_client': ('https://es-client.readthedocs.io/en/v8.17.5', None),
-    'elasticsearch8': ('https://elasticsearch-py.readthedocs.io/en/v8.17.2', None),
+    'python': ('https://docs.python.org/3.13', None),
+    'es_client': ('https://es-client.readthedocs.io/en/v9.0.0', None),
+    'elasticsearch9': ('https://elasticsearch-py.readthedocs.io/en/v9.1.0', None),
     'voluptuous': ('http://alecthomas.github.io/voluptuous/docs/_build/html', None),
-    'click': ('https://click.palletsprojects.com/en/8.1.x', None),
+    'click': ('https://click.palletsprojects.com/en/stable', None),
 }
diff -pruN 8.0.21-1/docs/docset.yml 9.0.0-1/docs/docset.yml
--- 8.0.21-1/docs/docset.yml	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/docset.yml	2025-10-03 20:58:42.000000000 +0000
@@ -7,5 +7,6 @@ cross_links:
 toc:
   - toc: reference
 subs:
-  curator_version: 8.0.18
-  curator_major: 8
\ No newline at end of file
+  curator_version: 8.0.21
+  curator_major: 8
+  es:   "Elasticsearch"
\ No newline at end of file
diff -pruN 8.0.21-1/docs/reference/alias.md 9.0.0-1/docs/reference/alias.md
--- 8.0.21-1/docs/reference/alias.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/alias.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/alias.html
+navigation_title: Alias
 ---
 
-# Alias [alias]
+# Alias action in {{es}} Curator [alias]
 
 ```yaml
 action: alias
diff -pruN 8.0.21-1/docs/reference/allocation.md 9.0.0-1/docs/reference/allocation.md
--- 8.0.21-1/docs/reference/allocation.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/allocation.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/allocation.html
+navigation_title: Allocation
 ---
 
-# Allocation [allocation]
+# Using the allocation action in {{es}} Curator [allocation]
 
 ```yaml
 action: allocation
diff -pruN 8.0.21-1/docs/reference/close.md 9.0.0-1/docs/reference/close.md
--- 8.0.21-1/docs/reference/close.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/close.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/close.html
+navigation_title: Close
 ---
 
-# Close [close]
+# Curator close action for {{es}} indices [close]
 
 ```yaml
 action: close
diff -pruN 8.0.21-1/docs/reference/configfile.md 9.0.0-1/docs/reference/configfile.md
--- 8.0.21-1/docs/reference/configfile.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/configfile.md	2025-10-03 20:58:42.000000000 +0000
@@ -79,22 +79,22 @@ Curator can only work with one cluster a
 Flow:
 
 ```sh
-hosts: [ "http://10.0.0.1:9200", "http://10.0.0.2:9200" ]
+hosts: [ "<HOST_IP_1>:9200", "<HOST_IP_2>:9200" ]
 ```
 
 Spanning:
 
 ```sh
-hosts: [ "http://10.0.0.1:9200",
-    "http://10.0.0.2:9200" ]
+hosts: [ "<HOST_IP_1>:9200",
+    "<HOST_IP_2>:9200" ]
 ```
 
 Block:
 
 ```sh
 hosts:
-  - http://10.0.0.1:9200
-  - http://10.0.0.2:9200
+  - <HOST_IP_1>:9200
+  - <HOST_IP_2>:9200
 ```
 
 
diff -pruN 8.0.21-1/docs/reference/docker.md 9.0.0-1/docs/reference/docker.md
--- 8.0.21-1/docs/reference/docker.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/docker.md	2025-10-03 20:58:42.000000000 +0000
@@ -9,7 +9,7 @@ Curator is periodically published to Doc
 
 Download Curator Docker image:
 
-```
+``` sh subs=true
 docker pull untergeek/curator:{{curator_version}}
 ```
 
diff -pruN 8.0.21-1/docs/reference/ex_alias.md 9.0.0-1/docs/reference/ex_alias.md
--- 8.0.21-1/docs/reference/ex_alias.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_alias.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_alias.html
+navigation_title: alias
 ---
 
-# alias [ex_alias]
+# Alias action example in {{es}} Curator [ex_alias]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/ex_allocation.md 9.0.0-1/docs/reference/ex_allocation.md
--- 8.0.21-1/docs/reference/ex_allocation.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_allocation.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_allocation.html
+navigation_title: allocation
 ---
 
-# allocation [ex_allocation]
+# {{es}} Curator allocation example [ex_allocation]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/ex_close.md 9.0.0-1/docs/reference/ex_close.md
--- 8.0.21-1/docs/reference/ex_close.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_close.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_close.html
+navigation_title: close
 ---
 
-# close [ex_close]
+# Close indices using {{es}} Curator [ex_close]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/ex_forcemerge.md 9.0.0-1/docs/reference/ex_forcemerge.md
--- 8.0.21-1/docs/reference/ex_forcemerge.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_forcemerge.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_forcemerge.html
+navigation_title: forcemerge
 ---
 
-# forcemerge [ex_forcemerge]
+# Forcemerge examples in Elastic Curator [ex_forcemerge]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/ex_open.md 9.0.0-1/docs/reference/ex_open.md
--- 8.0.21-1/docs/reference/ex_open.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_open.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_open.html
+navigation_title: open
 ---
 
-# open [ex_open]
+# open action examples [ex_open]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/ex_reindex.md 9.0.0-1/docs/reference/ex_reindex.md
--- 8.0.21-1/docs/reference/ex_reindex.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_reindex.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_reindex.html
+navigation_tite: reindex
 ---
 
-# reindex [ex_reindex]
+# reindex action examples [ex_reindex]
 
 ## Manually selected reindex of a single index [_manually_selected_reindex_of_a_single_index]
 
@@ -114,7 +115,7 @@ actions:
       request_body:
         source:
           remote:
-            host: http://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
             username: myuser
             password: mypass
           index: index1
@@ -148,7 +149,7 @@ actions:
       request_body:
         source:
           remote:
-            host: http://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
             username: myuser
             password: mypass
           index: REINDEX_SELECTION
diff -pruN 8.0.21-1/docs/reference/ex_restore.md 9.0.0-1/docs/reference/ex_restore.md
--- 8.0.21-1/docs/reference/ex_restore.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_restore.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_restore.html
+navigation_title: restore
 ---
 
-# restore [ex_restore]
+# restore action examples [ex_restore]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/ex_rollover.md 9.0.0-1/docs/reference/ex_rollover.md
--- 8.0.21-1/docs/reference/ex_rollover.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_rollover.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_rollover.html
+navigation_title: rollover
 ---
 
-# rollover [ex_rollover]
+# rollover action examples [ex_rollover]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/ex_shrink.md 9.0.0-1/docs/reference/ex_shrink.md
--- 8.0.21-1/docs/reference/ex_shrink.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_shrink.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_shrink.html
+navigation_title: shrink
 ---
 
-# shrink [ex_shrink]
+# shrink action examples [ex_shrink]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/ex_snapshot.md 9.0.0-1/docs/reference/ex_snapshot.md
--- 8.0.21-1/docs/reference/ex_snapshot.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ex_snapshot.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_snapshot.html
+navigation_title: snapshot
 ---
 
-# snapshot [ex_snapshot]
+# snapshot action examples [ex_snapshot]
 
 ```yaml
 ---
diff -pruN 8.0.21-1/docs/reference/fe_allocation_type.md 9.0.0-1/docs/reference/fe_allocation_type.md
--- 8.0.21-1/docs/reference/fe_allocation_type.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/fe_allocation_type.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/fe_allocation_type.html
+navigation_title: allocation_type
 ---
 
-# allocation_type [fe_allocation_type]
+# {{es}} Curator allocation filter setting: allocation_type [fe_allocation_type]
 
 ::::{note}
 This setting is used only when using the [allocated](/reference/filtertype_allocated.md) filter.
diff -pruN 8.0.21-1/docs/reference/fe_count.md 9.0.0-1/docs/reference/fe_count.md
--- 8.0.21-1/docs/reference/fe_count.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/fe_count.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/fe_count.html
+navigation_title: count
 ---
 
-# count [fe_count]
+# Elasticsearch Curator count filter setting [fe_count]
 
 ::::{note}
 This setting is only used with the [count](/reference/filtertype_count.md) filtertype<br> and is a required setting.
diff -pruN 8.0.21-1/docs/reference/fe_max_num_segments.md 9.0.0-1/docs/reference/fe_max_num_segments.md
--- 8.0.21-1/docs/reference/fe_max_num_segments.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/fe_max_num_segments.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/fe_max_num_segments.html
+navigation_title: max_num_segments
 ---
 
-# max_num_segments [fe_max_num_segments]
+# max_num_segments filter element [fe_max_num_segments]
 
 ::::{note}
 This setting is only used with the [forcemerged](/reference/filtertype_forcemerged.md) filtertype.
diff -pruN 8.0.21-1/docs/reference/fe_pattern.md 9.0.0-1/docs/reference/fe_pattern.md
--- 8.0.21-1/docs/reference/fe_pattern.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/fe_pattern.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/fe_pattern.html
+navigation_title: pattern
 ---
 
-# pattern [fe_pattern]
+# pattern filter element [fe_pattern]
 
 ::::{note}
 This setting is only used with the [count](/reference/filtertype_count.md) filtertype
diff -pruN 8.0.21-1/docs/reference/fe_state.md 9.0.0-1/docs/reference/fe_state.md
--- 8.0.21-1/docs/reference/fe_state.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/fe_state.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,15 +1,15 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/fe_state.html
+navigation_title: state
 ---
 
-# state [fe_state]
+# state filter element[fe_state]
 
 ::::{note}
 This setting is only used with the [state](/reference/filtertype_state.md) filtertype.
 ::::
 
-
 ```yaml
 - filtertype: state
   state: SUCCESS
diff -pruN 8.0.21-1/docs/reference/fe_value.md 9.0.0-1/docs/reference/fe_value.md
--- 8.0.21-1/docs/reference/fe_value.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/fe_value.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/fe_value.html
+navigation_title: value
 ---
 
-# value [fe_value]
+# value filter element [fe_value]
 
 ::::{note}
 This setting is only used with the [pattern](/reference/filtertype_pattern.md) filtertype and is a required setting.  There is a separate [value option](/reference/option_value.md) associated with the [allocation action](/reference/allocation.md), and the [allocated filtertype](/reference/filtertype_allocated.md).
diff -pruN 8.0.21-1/docs/reference/filtertype_alias.md 9.0.0-1/docs/reference/filtertype_alias.md
--- 8.0.21-1/docs/reference/filtertype_alias.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/filtertype_alias.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/filtertype_alias.html
+navigation_title: alias
 ---
 
-# alias [filtertype_alias]
+# Using the alias filter in {{es}} Curator [filtertype_alias]
 
 ```yaml
 - filtertype: alias
diff -pruN 8.0.21-1/docs/reference/filtertype_count.md 9.0.0-1/docs/reference/filtertype_count.md
--- 8.0.21-1/docs/reference/filtertype_count.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/filtertype_count.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/filtertype_count.html
+navigation_title: count
 ---
 
-# count [filtertype_count]
+# {{es}} Curator count filtertype for sorting [filtertype_count]
 
 ```yaml
 - filtertype: count
@@ -13,7 +14,7 @@ mapped_pages:
 ::::{note}
 Empty values and commented lines will result in the default value, if any, being selected.  If a setting is set, but not used by a given [filtertype](/reference/filtertype.md), it may generate an error.
 ::::
-
+    
 
 This [filtertype](/reference/filtertype.md) will iterate over the actionable list of indices *or* snapshots. They are ordered by age, or by alphabet, so as to guarantee that the correct items will remain in, or be removed from the actionable list based on the values of [count](/reference/fe_count.md), [exclude](/reference/fe_exclude.md), and [reverse](/reference/fe_reverse.md).
 
diff -pruN 8.0.21-1/docs/reference/filtertype_pattern.md 9.0.0-1/docs/reference/filtertype_pattern.md
--- 8.0.21-1/docs/reference/filtertype_pattern.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/filtertype_pattern.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/filtertype_pattern.html
+navigation_title: pattern
 ---
 
-# pattern [filtertype_pattern]
+# pattern filter type [filtertype_pattern]
 
 ```yaml
 - filtertype: pattern
diff -pruN 8.0.21-1/docs/reference/filtertype_state.md 9.0.0-1/docs/reference/filtertype_state.md
--- 8.0.21-1/docs/reference/filtertype_state.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/filtertype_state.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/filtertype_state.html
+navigation_title: state
 ---
 
-# state [filtertype_state]
+# state filter type [filtertype_state]
 
 ```yaml
 - filtertype: state
diff -pruN 8.0.21-1/docs/reference/forcemerge.md 9.0.0-1/docs/reference/forcemerge.md
--- 8.0.21-1/docs/reference/forcemerge.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/forcemerge.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/forcemerge.html
+navigation_title: Forcemerge
 ---
 
-# Forcemerge [forcemerge]
+# Forcemerge action in Elastic Curator [forcemerge]
 
 ```yaml
 action: forcemerge
diff -pruN 8.0.21-1/docs/reference/ilm-actions.md 9.0.0-1/docs/reference/ilm-actions.md
--- 8.0.21-1/docs/reference/ilm-actions.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/ilm-actions.md	2025-10-03 20:58:42.000000000 +0000
@@ -14,12 +14,12 @@ ILM applies policy actions as indices en
 
 The policy actions include:
 
-* [Set Priority](http://www.elastic.co/guide/en/elasticsearch/reference/8.15/ilm-set-priority.md)
-* [Rollover](http://www.elastic.co/guide/en/elasticsearch/reference/8.15/ilm-rollover.md)
-* [Unfollow](http://www.elastic.co/guide/en/elasticsearch/reference/8.15/ilm-unfollow.md)
-* [Allocate](http://www.elastic.co/guide/en/elasticsearch/reference/8.15/ilm-allocate.md)
-* [Read-Only](http://www.elastic.co/guide/en/elasticsearch/reference/8.15/ilm-readonly.md)
-* [Force Merge](http://www.elastic.co/guide/en/elasticsearch/reference/8.15/ilm-forcemerge.md)
-* [Shrink](http://www.elastic.co/guide/en/elasticsearch/reference/8.15/ilm-shrink.md)
-* [Delete](http://www.elastic.co/guide/en/elasticsearch/reference/8.15/ilm-delete.md)
+* [Set Priority](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-set-priority.md)
+* [Rollover](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-rollover.md)
+* [Unfollow](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-unfollow.md)
+* [Allocate](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-allocate.md)
+* [Read-Only](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-readonly.md)
+* [Force Merge](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-forcemerge.md)
+* [Shrink](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-shrink.md)
+* [Delete](elasticsearch://reference/elasticsearch/index-lifecycle-actions/ilm-delete.md)
 
diff -pruN 8.0.21-1/docs/reference/open.md 9.0.0-1/docs/reference/open.md
--- 8.0.21-1/docs/reference/open.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/open.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/open.html
+navigation_title: open
 ---
 
-# Open [open]
+# Open action [open]
 
 ```yaml
 action: open
diff -pruN 8.0.21-1/docs/reference/option_allocation_type.md 9.0.0-1/docs/reference/option_allocation_type.md
--- 8.0.21-1/docs/reference/option_allocation_type.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_allocation_type.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/option_allocation_type.html
+navigation_title: allocation_type
 ---
 
-# allocation_type [option_allocation_type]
+# {{es}} Curator allocation action option: allocation_type [option_allocation_type]
 
 ::::{note}
 This setting is used only when using the [allocation action](/reference/allocation.md)
diff -pruN 8.0.21-1/docs/reference/option_count.md 9.0.0-1/docs/reference/option_count.md
--- 8.0.21-1/docs/reference/option_count.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_count.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/option_count.html
+navigation_title: count
 ---
 
-# count [option_count]
+# Count option for replicas [option_count]
 
 ::::{note}
 This setting is required when using the [replicas action](/reference/replicas.md).
diff -pruN 8.0.21-1/docs/reference/option_mns.md 9.0.0-1/docs/reference/option_mns.md
--- 8.0.21-1/docs/reference/option_mns.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_mns.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/option_mns.html
+navigation_title: max_num_segments
 ---
 
-# max_num_segments [option_mns]
+# max_num_segments option [option_mns]
 
 ::::{note}
 This setting is required when using the [forceMerge action](/reference/forcemerge.md).
diff -pruN 8.0.21-1/docs/reference/option_remote_certificate.md 9.0.0-1/docs/reference/option_remote_certificate.md
--- 8.0.21-1/docs/reference/option_remote_certificate.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_remote_certificate.md	2025-10-03 20:58:42.000000000 +0000
@@ -21,7 +21,7 @@ actions:
       request_body:
         source:
           remote:
-            host: https://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
           index: index1
         dest:
           index: index2
diff -pruN 8.0.21-1/docs/reference/option_remote_client_cert.md 9.0.0-1/docs/reference/option_remote_client_cert.md
--- 8.0.21-1/docs/reference/option_remote_client_cert.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_remote_client_cert.md	2025-10-03 20:58:42.000000000 +0000
@@ -26,7 +26,7 @@ actions:
       request_body:
         source:
           remote:
-            host: https://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
           index: index1
         dest:
           index: index2
diff -pruN 8.0.21-1/docs/reference/option_remote_client_key.md 9.0.0-1/docs/reference/option_remote_client_key.md
--- 8.0.21-1/docs/reference/option_remote_client_key.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_remote_client_key.md	2025-10-03 20:58:42.000000000 +0000
@@ -26,7 +26,7 @@ actions:
       request_body:
         source:
           remote:
-            host: https://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
           index: index1
         dest:
           index: index2
diff -pruN 8.0.21-1/docs/reference/option_remote_filters.md 9.0.0-1/docs/reference/option_remote_filters.md
--- 8.0.21-1/docs/reference/option_remote_filters.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_remote_filters.md	2025-10-03 20:58:42.000000000 +0000
@@ -23,7 +23,7 @@ actions:
       request_body:
         source:
           remote:
-            host: https://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
           index: REINDEX_SELECTION
         dest:
           index: index2
diff -pruN 8.0.21-1/docs/reference/option_remote_url_prefix.md 9.0.0-1/docs/reference/option_remote_url_prefix.md
--- 8.0.21-1/docs/reference/option_remote_url_prefix.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_remote_url_prefix.md	2025-10-03 20:58:42.000000000 +0000
@@ -24,7 +24,7 @@ actions:
       request_body:
         source:
           remote:
-            host: https://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
           index: index1
         dest:
           index: index2
diff -pruN 8.0.21-1/docs/reference/option_request_body.md 9.0.0-1/docs/reference/option_request_body.md
--- 8.0.21-1/docs/reference/option_request_body.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_request_body.md	2025-10-03 20:58:42.000000000 +0000
@@ -96,7 +96,7 @@ actions:
       request_body:
         source:
           remote:
-            host: http://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
             username: myuser
             password: mypass
           index: index1
@@ -152,7 +152,7 @@ actions:
       request_body:
         source:
           remote:
-            host: http://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
             username: myuser
             password: mypass
           index: REINDEX_SELECTION
@@ -224,7 +224,7 @@ actions:
       request_body:
         source:
           remote:
-            host: http://otherhost:9200
+            host: <OTHER_HOST_URL>:9200
             username: myuser
             password: mypass
           index: REINDEX_SELECTION
diff -pruN 8.0.21-1/docs/reference/option_value.md 9.0.0-1/docs/reference/option_value.md
--- 8.0.21-1/docs/reference/option_value.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/option_value.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/option_value.html
+navigation_title: value
 ---
 
-# value [option_value]
+# value option [option_value]
 
 ::::{note}
 This setting is optional when using the [allocation action](/reference/allocation.md) and required when using the [cluster_routing action](/reference/cluster_routing.md).
diff -pruN 8.0.21-1/docs/reference/pip.md 9.0.0-1/docs/reference/pip.md
--- 8.0.21-1/docs/reference/pip.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/pip.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,8 +1,6 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/pip.html
-sub:
-  curator_v: 8.0.18
 ---
 
 # pip [pip]
@@ -32,8 +30,8 @@ pip install -U elasticsearch-curator==X.
 
 For example:
 
-```
-pip install -U elasticsearch-curator=={{curator_v}}
+```sh subs=true
+pip install -U elasticsearch-curator=={{curator_version}}
 ```
 
 ## System-wide vs. User-only installation [_system_wide_vs_user_only_installation]
diff -pruN 8.0.21-1/docs/reference/python-source.md 9.0.0-1/docs/reference/python-source.md
--- 8.0.21-1/docs/reference/python-source.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/python-source.md	2025-10-03 20:58:42.000000000 +0000
@@ -5,12 +5,17 @@ mapped_pages:
 
 # Installation from source [python-source]
 
-Installing or Curator from source tarball (rather than doing a `git clone`) is also possible.
+You can instal Curator from a source tarball (rather than doing a `git clone`).
 
-Download and install Curator from tarball:
 
-1. `wget https://github.com/elastic/curator/archive/v``8.0.17.tar.gz -O elasticsearch-curator.tar.gz`
-2. `pip install elasticsearch-curator.tar.gz`
+1. Download the Curator tarball:
+```sh subs=true
+wget https://github.com/elastic/curator/archive/v{{curator_version}}.tar.gz -O elasticsearch-curator.tar.gz
+```
+2. Install the tarball:
+```sh
+pip install elasticsearch-curator.tar.gz
+```
 
- 
+ 
 
diff -pruN 8.0.21-1/docs/reference/reindex.md 9.0.0-1/docs/reference/reindex.md
--- 8.0.21-1/docs/reference/reindex.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/reindex.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/reindex.html
+navigation_title: Reindex
 ---
 
-# Reindex [reindex]
+# Reindex action [reindex]
 
 ```yaml
 actions:
@@ -28,7 +29,6 @@ There are many options for the reindex o
 
 * [request_body](/reference/option_request_body.md)
 
-
 ## Optional settings [_optional_settings_12]
 
 * [refresh](/reference/option_refresh.md)
@@ -56,10 +56,6 @@ There are many options for the reindex o
 See an example of this action in an [actionfile](/reference/actionfile.md) [here](/reference/ex_reindex.md).
 ::::
 
-
-
 ## Compatibility [_compatibility]
 
 Generally speaking, the Curator should be able to perform a remote reindex from any version of Elasticsearch, 1.4 and newer. Strictly speaking, the Reindex API in Elasticsearch *is* able to reindex from older clusters, but Curator cannot be used to facilitate this due to Curator’s dependency on changes released in 1.4.
-
-
diff -pruN 8.0.21-1/docs/reference/restore.md 9.0.0-1/docs/reference/restore.md
--- 8.0.21-1/docs/reference/restore.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/restore.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/restore.html
+navigation_title: Restore
 ---
 
-# Restore [restore]
+# Restore action [restore]
 
 ```yaml
 actions:
diff -pruN 8.0.21-1/docs/reference/rollover.md 9.0.0-1/docs/reference/rollover.md
--- 8.0.21-1/docs/reference/rollover.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/rollover.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/rollover.html
+navigation_title: Rollover
 ---
 
-# Rollover [rollover]
+# Rollover action [rollover]
 
 ```yaml
 action: rollover
diff -pruN 8.0.21-1/docs/reference/shrink.md 9.0.0-1/docs/reference/shrink.md
--- 8.0.21-1/docs/reference/shrink.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/shrink.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/shrink.html
+navigation_title: Shrink
 ---
 
-# Shrink [shrink]
+# Shrink action [shrink]
 
 ```yaml
 action: shrink
diff -pruN 8.0.21-1/docs/reference/snapshot.md 9.0.0-1/docs/reference/snapshot.md
--- 8.0.21-1/docs/reference/snapshot.md	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/docs/reference/snapshot.md	2025-10-03 20:58:42.000000000 +0000
@@ -1,9 +1,10 @@
 ---
 mapped_pages:
   - https://www.elastic.co/guide/en/elasticsearch/client/curator/current/snapshot.html
+navigation_title: Snapshot
 ---
 
-# Snapshot [snapshot]
+# Snapshot action [snapshot]
 
 ```yaml
 action: snapshot
diff -pruN 8.0.21-1/pyproject.toml 9.0.0-1/pyproject.toml
--- 8.0.21-1/pyproject.toml	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/pyproject.toml	2025-10-03 20:58:42.000000000 +0000
@@ -9,18 +9,17 @@ dynamic = ["version"]
 description = "Tending your Elasticsearch indices and snapshots"
 license = {file = "LICENSE"}
 readme = "README.rst"
-requires-python = ">=3.8"
+requires-python = ">=3.10"
 classifiers = [
     "Intended Audience :: Developers",
     "Intended Audience :: System Administrators",
     "License :: OSI Approved :: Apache Software License",
     "Operating System :: OS Independent",
     "Programming Language :: Python",
-    "Programming Language :: Python :: 3.8",
-    "Programming Language :: Python :: 3.9",
     "Programming Language :: Python :: 3.10",
     "Programming Language :: Python :: 3.11",
     "Programming Language :: Python :: 3.12",
+    "Programming Language :: Python :: 3.13",
 ]
 keywords = [
     'elasticsearch',
@@ -29,7 +28,7 @@ keywords = [
     'index-expiry'
 ]
 dependencies = [
-    "es_client==8.17.5"
+    "es_client==9.0.1"
 ]
 
 [project.optional-dependencies]
@@ -105,7 +104,7 @@ all = ["python", "templates"]
 max-line-length = "88"
 
 [tool.black]
-target-version = ['py38']
+target-version = ['py310']
 line-length = 88
 skip-string-normalization = true
 include = '\.pyi?$'
@@ -116,11 +115,11 @@ platforms = ["linux", "macos"]
 dependencies = [
     "requests",
     "pytest >=7.2.1",
-    "pytest-cov"
+    "pytest-cov",
 ]
 
 [[tool.hatch.envs.test.matrix]]
-python = ["3.8", "3.9", "3.10", "3.11", "3.12"]
+python = ["3.10", "3.11", "3.12", "3.13"]
 
 [tool.hatch.envs.test.scripts]
 pytest = "source docker_test/.env; pytest"
diff -pruN 8.0.21-1/tests/integration/__init__.py 9.0.0-1/tests/integration/__init__.py
--- 8.0.21-1/tests/integration/__init__.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/integration/__init__.py	2025-10-03 20:58:42.000000000 +0000
@@ -14,11 +14,11 @@ import warnings
 from datetime import timedelta, datetime, date, timezone
 from subprocess import Popen, PIPE
 from unittest import SkipTest, TestCase
-from elasticsearch8 import Elasticsearch
-from elasticsearch8.exceptions import ConnectionError as ESConnectionError
-from elasticsearch8.exceptions import ElasticsearchWarning, NotFoundError
+from elasticsearch9 import Elasticsearch
+from elasticsearch9.exceptions import ConnectionError as ESConnectionError
+from elasticsearch9.exceptions import ElasticsearchWarning, NotFoundError
 from click import testing as clicktest
-from es_client.helpers.utils import get_version
+from es_client.utils import get_version
 from curator.cli import cli
 
 from . import testvars
diff -pruN 8.0.21-1/tests/integration/test_alias.py 9.0.0-1/tests/integration/test_alias.py
--- 8.0.21-1/tests/integration/test_alias.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/integration/test_alias.py	2025-10-03 20:58:42.000000000 +0000
@@ -5,11 +5,11 @@ import os
 import logging
 from datetime import datetime, timedelta, timezone
 import pytest
-from elasticsearch8.exceptions import NotFoundError
+from elasticsearch9.exceptions import NotFoundError
 from . import CuratorTestCase
 from . import testvars
 
-LOGGER = logging.getLogger('test_alias')
+LOGGER = logging.getLogger(__name__)
 
 HOST = os.environ.get('TEST_ES_SERVER', 'http://127.0.0.1:9200')
 
@@ -41,7 +41,8 @@ class TestActionFileAlias(CuratorTestCas
                 'aliases': {'testalias': {'filter': {'term': {'user': 'kimchy'}}}}
             }
         }
-        assert expected == self.client.indices.get_alias(name=alias)
+        result = self.client.indices.get_alias(name=alias)
+        assert expected == result.body
 
     def test_alias_remove_only(self):
         alias = 'testalias'
@@ -55,7 +56,8 @@ class TestActionFileAlias(CuratorTestCas
         self.client.indices.put_alias(index=idx2, name=alias)
         self.invoke_runner()
         expected = {idx2: {'aliases': {}}, idx1: {'aliases': {}}}
-        assert expected == self.client.indices.get_alias(index=f'{idx1},{idx2}')
+        result = self.client.indices.get_alias(index=f'{idx1},{idx2}')
+        assert expected == result.body
 
     def test_add_only_skip_closed(self):
         alias = 'testalias'
@@ -83,7 +85,17 @@ class TestActionFileAlias(CuratorTestCas
         self.client.indices.put_alias(index=idx1, name=alias)
         self.invoke_runner()
         expected = {idx2: {'aliases': {alias: {}}}}
-        assert expected == self.client.indices.get_alias(name=alias)
+        success = False
+        result = self.client.indices.get_alias(name=alias)
+        tries = 0
+        max_tries = 10
+        while not success and tries < max_tries:
+            if expected == result.body:
+                success = True
+            else:
+                tries += 1
+                result = self.client.indices.get_alias(name=alias)
+        assert expected == result.body
 
     def test_add_and_remove_datemath(self):
         alias = '<testalias-{now-1d/d}>'
@@ -99,7 +111,8 @@ class TestActionFileAlias(CuratorTestCas
         self.client.indices.put_alias(index=idx1, name=alias_parsed)
         self.invoke_runner()
         expected = {idx2: {'aliases': {alias_parsed: {}}}}
-        assert expected == self.client.indices.get_alias(name=alias_parsed)
+        result = self.client.indices.get_alias(name=alias_parsed)
+        assert expected == result.body
 
     def test_add_with_empty_remove(self):
         alias = 'testalias'
@@ -113,7 +126,8 @@ class TestActionFileAlias(CuratorTestCas
         self.client.indices.put_alias(index=idx1, name=alias)
         self.invoke_runner()
         expected = {idx1: {'aliases': {alias: {}}}, idx2: {'aliases': {alias: {}}}}
-        assert expected == self.client.indices.get_alias(name=alias)
+        result = self.client.indices.get_alias(name=alias)
+        assert expected == result.body
 
     def test_remove_with_empty_add(self):
         alias = 'testalias'
@@ -127,7 +141,8 @@ class TestActionFileAlias(CuratorTestCas
         self.client.indices.put_alias(index=f'{idx1},{idx2}', name=alias)
         self.invoke_runner()
         expected = {idx2: {'aliases': {alias: {}}}}
-        assert expected == self.client.indices.get_alias(name=alias)
+        result = self.client.indices.get_alias(name=alias)
+        assert expected == result.body
 
     def test_add_with_empty_list(self):
         alias = 'testalias'
@@ -142,7 +157,8 @@ class TestActionFileAlias(CuratorTestCas
         self.client.indices.put_alias(index=idx1, name=alias)
         self.invoke_runner()
         expected = {idx1: {'aliases': {alias: {}}}}
-        assert expected == self.client.indices.get_alias(name=alias)
+        result = self.client.indices.get_alias(name=alias)
+        assert expected == result.body
 
     def test_remove_with_empty_list(self):
         alias = 'testalias'
@@ -157,7 +173,8 @@ class TestActionFileAlias(CuratorTestCas
         self.client.indices.put_alias(index=idx1, name=alias)
         self.invoke_runner()
         expected = {idx1: {'aliases': {alias: {}}}}
-        assert expected == self.client.indices.get_alias(name=alias)
+        result = self.client.indices.get_alias(name=alias)
+        assert expected == result.body
 
     def test_remove_index_not_in_alias(self):
         alias = 'testalias'
@@ -235,7 +252,8 @@ class TestActionFileAlias(CuratorTestCas
         self.client.indices.put_alias(index=idx2, name=alias)
         self.invoke_runner()
         expected = {idx1: {'aliases': {alias: {}}}}
-        assert expected == self.client.indices.get_alias(name=alias)
+        result = self.client.indices.get_alias(name=alias)
+        assert expected == result.body
 
 
 class TestCLIAlias(CuratorTestCase):
@@ -262,7 +280,17 @@ class TestCLIAlias(CuratorTestCase):
         ]
         assert 0 == self.run_subprocess(args)
         expected = {idx1: {'aliases': {alias: {}}}}
-        assert expected == self.client.indices.get_alias(name=alias)
+        success = False
+        result = self.client.indices.get_alias(name=alias)
+        tries = 0
+        max_tries = 1
+        while not success and tries < max_tries:
+            if expected == result.body:
+                success = True
+            else:
+                tries += 1
+                result = self.client.indices.get_alias(name=alias)
+        assert success
 
     def test_warn_if_no_indices(self):
         """test_warn_if_no_indices"""
@@ -287,7 +315,8 @@ class TestCLIAlias(CuratorTestCase):
         LOGGER.debug('ARGS = %s', args)
         assert 0 == self.run_subprocess(args)
         expected = {idx1: {'aliases': {alias: {}}}, idx2: {'aliases': {alias: {}}}}
-        assert expected == dict(self.client.indices.get_alias(name=alias))
+        result = self.client.indices.get_alias(name=alias)
+        assert expected == result.body
 
     def test_exit_1_on_empty_list(self):
         """test_exit_1_on_empty_list"""
diff -pruN 8.0.21-1/tests/integration/test_create_index.py 9.0.0-1/tests/integration/test_create_index.py
--- 8.0.21-1/tests/integration/test_create_index.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/integration/test_create_index.py	2025-10-03 20:58:42.000000000 +0000
@@ -128,5 +128,5 @@ class TestCLICreateIndex(CuratorTestCase
         self.write_config(self.args['actionfile'], config.format(idx))
         self.invoke_runner()
         assert not get_indices(self.client)
-        assert 'mapper_parsing_exception' in self.result.stdout
+        assert 'mapper_parsing_exception' in self.result.stderr_bytes.decode('utf-8')
         assert 1 == self.result.exit_code
diff -pruN 8.0.21-1/tests/integration/test_es_repo_mgr.py 9.0.0-1/tests/integration/test_es_repo_mgr.py
--- 8.0.21-1/tests/integration/test_es_repo_mgr.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/integration/test_es_repo_mgr.py	2025-10-03 20:58:42.000000000 +0000
@@ -202,4 +202,7 @@ class TestCLIShowRepositories(CuratorTes
         )
         # The splitlines()[-1] allows me to only capture the last line and ignore
         # other output
-        assert self.args['repository'] == result.output.splitlines()[-1]
+        assert (
+            self.args['repository']
+            == result.stdout_bytes.decode('utf-8').splitlines()[-1]
+        )
diff -pruN 8.0.21-1/tests/integration/test_integrations.py 9.0.0-1/tests/integration/test_integrations.py
--- 8.0.21-1/tests/integration/test_integrations.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/integration/test_integrations.py	2025-10-03 20:58:42.000000000 +0000
@@ -4,8 +4,8 @@
 import os
 import warnings
 import pytest
-from elasticsearch8 import Elasticsearch
-from elasticsearch8.exceptions import ElasticsearchWarning, NotFoundError
+from elasticsearch9 import Elasticsearch
+from elasticsearch9.exceptions import ElasticsearchWarning, NotFoundError
 from curator.exceptions import ConfigurationError
 from curator.helpers.getters import get_indices
 from curator import IndexList
diff -pruN 8.0.21-1/tests/integration/test_open.py 9.0.0-1/tests/integration/test_open.py
--- 8.0.21-1/tests/integration/test_open.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/integration/test_open.py	2025-10-03 20:58:42.000000000 +0000
@@ -3,7 +3,7 @@
 # pylint: disable=C0115, C0116, invalid-name
 import os
 import warnings
-from elasticsearch8.exceptions import ElasticsearchWarning
+from elasticsearch9.exceptions import ElasticsearchWarning
 from . import CuratorTestCase
 from . import testvars
 
@@ -27,9 +27,14 @@ class TestActionFileOpenClosed(CuratorTe
         warnings.filterwarnings("ignore", category=ElasticsearchWarning)
         self.client.indices.close(index=idx2, ignore_unavailable=True)
         self.invoke_runner()
+        check = {idx1: False, idx2: False}
+        success = False
         csi = self.client.cluster.state(metric=MET)[MET]['indices']
         for idx in (idx1, idx2):
-            assert 'close' != csi[idx]['state']
+            if 'close' != csi[idx]['state']:
+                check[idx] = True
+        success = all(check.values())
+        assert success
 
     def test_extra_option(self):
         self.write_config(self.args['configfile'], testvars.client_config.format(HOST))
diff -pruN 8.0.21-1/tests/unit/test_helpers_date_ops.py 9.0.0-1/tests/unit/test_helpers_date_ops.py
--- 8.0.21-1/tests/unit/test_helpers_date_ops.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/unit/test_helpers_date_ops.py	2025-10-03 20:58:42.000000000 +0000
@@ -1,21 +1,30 @@
 """test_helpers_date_ops"""
+
 from datetime import datetime
 from unittest import TestCase
 from unittest.mock import Mock
 import pytest
-from elasticsearch8 import NotFoundError
+from elasticsearch9 import NotFoundError
 from elastic_transport import ApiResponseMeta
 from curator.exceptions import ConfigurationError
 from curator.helpers.date_ops import (
-    absolute_date_range, date_range, datetime_to_epoch, fix_epoch, get_date_regex, get_datemath,
-    get_point_of_reference, isdatemath
+    absolute_date_range,
+    date_range,
+    datetime_to_epoch,
+    fix_epoch,
+    get_date_regex,
+    get_datemath,
+    get_point_of_reference,
+    isdatemath,
 )
 
+
 class TestGetDateRegex(TestCase):
     """TestGetDateRegex
 
     Test helpers.date_ops.get_date_regex functionality.
     """
+
     def test_non_escaped(self):
         """test_non_escaped
 
@@ -23,11 +32,13 @@ class TestGetDateRegex(TestCase):
         """
         assert '\\d{4}\\-\\d{2}\\-\\d{2}t\\d{2}' == get_date_regex('%Y-%m-%dt%H')
 
+
 class TestFixEpoch(TestCase):
     """TestFixEpoch
 
     Test helpers.date_ops.fix_epoch functionality.
     """
+
     def test_fix_epoch(self):
         """test_fix_epoch
 
@@ -42,8 +53,9 @@ class TestFixEpoch(TestCase):
             (145928763600000000, 1459287636),
             (145928763600000001, 1459287636),
             (1459287636123456789, 1459287636),
-                ]:
+        ]:
             assert epoch == fix_epoch(long_epoch)
+
     def test_fix_epoch_raise(self):
         """test_fix_epoch_raise
 
@@ -52,11 +64,13 @@ class TestFixEpoch(TestCase):
         with pytest.raises(ValueError):
             fix_epoch(None)
 
+
 class TestGetPointOfReference(TestCase):
     """TestGetPointOfReference
 
     Test helpers.date_ops.get_point_of_reference functionality.
     """
+
     def test_get_point_of_reference(self):
         """test_get_point_of_reference
 
@@ -64,16 +78,17 @@ class TestGetPointOfReference(TestCase):
         """
         epoch = 1459288037
         for unit, result in [
-            ('seconds', epoch-1),
-            ('minutes', epoch-60),
-            ('hours', epoch-3600),
-            ('days', epoch-86400),
-            ('weeks', epoch-(86400*7)),
-            ('months', epoch-(86400*30)),
-            ('years', epoch-(86400*365)),
-                ]:
+            ('seconds', epoch - 1),
+            ('minutes', epoch - 60),
+            ('hours', epoch - 3600),
+            ('days', epoch - 86400),
+            ('weeks', epoch - (86400 * 7)),
+            ('months', epoch - (86400 * 30)),
+            ('years', epoch - (86400 * 365)),
+        ]:
             # self.assertEqual(result, get_point_of_reference(unit, 1, epoch))
             assert result == get_point_of_reference(unit, 1, epoch)
+
     def test_get_por_raise(self):
         """test_get_por_raise
 
@@ -83,12 +98,15 @@ class TestGetPointOfReference(TestCase):
         with pytest.raises(ValueError):
             get_point_of_reference('invalid', 1)
 
+
 class TestDateRange(TestCase):
     """TestDateRange
 
     Test helpers.date_ops.date_range functionality.
     """
-    EPOCH = datetime_to_epoch(datetime(2017,  4,  3, 22, 50, 17))
+
+    EPOCH = datetime_to_epoch(datetime(2017, 4, 3, 22, 50, 17))
+
     def test_bad_unit(self):
         """test_bad_unit
 
@@ -96,13 +114,17 @@ class TestDateRange(TestCase):
         """
         with pytest.raises(ConfigurationError, match=r'"unit" must be one of'):
             date_range('invalid', 1, 1)
+
     def test_bad_range(self):
         """test_bad_range
 
         Should raise a ``ConfigurationError`` exception when an improper range value is passed
         """
-        with pytest.raises(ConfigurationError, match=r'must be greater than or equal to'):
+        with pytest.raises(
+            ConfigurationError, match=r'must be greater than or equal to'
+        ):
             date_range('hours', 1, -1)
+
     def test_hours_single(self):
         """test_hours_single
 
@@ -111,9 +133,10 @@ class TestDateRange(TestCase):
         unit = 'hours'
         range_from = -1
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  4,  3, 21,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  3, 21, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 3, 21, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 3, 21, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_hours_past_range(self):
         """test_hours_past_range
 
@@ -122,9 +145,10 @@ class TestDateRange(TestCase):
         unit = 'hours'
         range_from = -3
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  4,  3, 19,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  3, 21, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 3, 19, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 3, 21, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_hours_future_range(self):
         """test_hours_future_range
 
@@ -133,9 +157,10 @@ class TestDateRange(TestCase):
         unit = 'hours'
         range_from = 0
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  4,  3, 22,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  4, 00, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 3, 22, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 4, 00, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_hours_span_range(self):
         """test_hours_span_range
 
@@ -144,9 +169,10 @@ class TestDateRange(TestCase):
         unit = 'hours'
         range_from = -1
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  4,  3, 21,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  4, 00, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 3, 21, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 4, 00, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_days_single(self):
         """test_days_single
 
@@ -155,9 +181,10 @@ class TestDateRange(TestCase):
         unit = 'days'
         range_from = -1
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  4,  2,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  2, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 2, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 2, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_days_past_range(self):
         """test_days_range
 
@@ -166,9 +193,10 @@ class TestDateRange(TestCase):
         unit = 'days'
         range_from = -3
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  3, 31,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  2, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 31, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 2, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_days_future_range(self):
         """test_days_future_range
 
@@ -177,9 +205,10 @@ class TestDateRange(TestCase):
         unit = 'days'
         range_from = 0
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  4,  3,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  5, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 3, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 5, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_days_span_range(self):
         """test_days_span_range
 
@@ -188,9 +217,10 @@ class TestDateRange(TestCase):
         unit = 'days'
         range_from = -1
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  4,  2,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  5, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 2, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 5, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_weeks_single(self):
         """test_weeks_single
 
@@ -199,9 +229,10 @@ class TestDateRange(TestCase):
         unit = 'weeks'
         range_from = -1
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  3, 26,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  1, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 26, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 1, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_weeks_past_range(self):
         """test_weeks_past_range
 
@@ -210,9 +241,10 @@ class TestDateRange(TestCase):
         unit = 'weeks'
         range_from = -3
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  3, 12,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  1, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 12, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 1, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_weeks_future_range(self):
         """test_weeks_future_range
 
@@ -221,9 +253,10 @@ class TestDateRange(TestCase):
         unit = 'weeks'
         range_from = 0
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  4,  2, 00,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4, 22, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 2, 00, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 22, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_weeks_span_range(self):
         """test_weeks_span_range
 
@@ -232,9 +265,10 @@ class TestDateRange(TestCase):
         unit = 'weeks'
         range_from = -1
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  3, 26,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4, 22, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 26, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 22, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_weeks_single_iso(self):
         """test_weeks_single_iso
 
@@ -244,10 +278,13 @@ class TestDateRange(TestCase):
         unit = 'weeks'
         range_from = -1
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  3, 27,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  2, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 27, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 2, 23, 59, 59))
         # pylint: disable=line-too-long
-        assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday')
+        assert (start, end) == date_range(
+            unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday'
+        )
+
     def test_weeks_past_range_iso(self):
         """test_weeks_past_range_iso
 
@@ -257,10 +294,13 @@ class TestDateRange(TestCase):
         unit = 'weeks'
         range_from = -3
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  3, 13,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4,  2, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 13, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 2, 23, 59, 59))
         # pylint: disable=line-too-long
-        assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday')
+        assert (start, end) == date_range(
+            unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday'
+        )
+
     def test_weeks_future_range_iso(self):
         """test_weeks_future_range_iso
 
@@ -270,10 +310,13 @@ class TestDateRange(TestCase):
         unit = 'weeks'
         range_from = 0
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  4,  3,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4, 23, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 4, 3, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 23, 23, 59, 59))
         # pylint: disable=line-too-long
-        assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday')
+        assert (start, end) == date_range(
+            unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday'
+        )
+
     def test_weeks_span_range_iso(self):
         """test_weeks_span_range_iso
 
@@ -283,10 +326,13 @@ class TestDateRange(TestCase):
         unit = 'weeks'
         range_from = -1
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  3, 27,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  4, 23, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 27, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 4, 23, 23, 59, 59))
         # pylint: disable=line-too-long
-        assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday')
+        assert (start, end) == date_range(
+            unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday'
+        )
+
     def test_months_single(self):
         """test_months_single
 
@@ -295,9 +341,10 @@ class TestDateRange(TestCase):
         unit = 'months'
         range_from = -1
         range_to = -1
-        start = datetime_to_epoch(datetime(2017,  3,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  3, 31, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 3, 31, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_months_past_range(self):
         """test_months_past_range
 
@@ -306,9 +353,10 @@ class TestDateRange(TestCase):
         unit = 'months'
         range_from = -4
         range_to = -1
-        start = datetime_to_epoch(datetime(2016, 12,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  3, 31, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2016, 12, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 3, 31, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_months_future_range(self):
         """test_months_future_range
 
@@ -317,9 +365,10 @@ class TestDateRange(TestCase):
         unit = 'months'
         range_from = 7
         range_to = 10
-        start = datetime_to_epoch(datetime(2017, 11,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2018,  2, 28, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 11, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2018, 2, 28, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_months_super_future_range(self):
         """test_months_super_future_range
 
@@ -328,9 +377,10 @@ class TestDateRange(TestCase):
         unit = 'months'
         range_from = 9
         range_to = 10
-        start = datetime_to_epoch(datetime(2018,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2018,  2, 28, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2018, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2018, 2, 28, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_months_span_range(self):
         """test_months_span_range
 
@@ -340,9 +390,10 @@ class TestDateRange(TestCase):
         range_from = -1
         range_to = 2
 
-        start = datetime_to_epoch(datetime(2017,  3,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  6, 30, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 3, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 6, 30, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_years_single(self):
         """test_years_single
 
@@ -351,9 +402,10 @@ class TestDateRange(TestCase):
         unit = 'years'
         range_from = -1
         range_to = -1
-        start = datetime_to_epoch(datetime(2016,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2016, 12, 31, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2016, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2016, 12, 31, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_years_past_range(self):
         """test_years_past_range
 
@@ -362,9 +414,10 @@ class TestDateRange(TestCase):
         unit = 'years'
         range_from = -3
         range_to = -1
-        start = datetime_to_epoch(datetime(2014,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2016, 12, 31, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2014, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2016, 12, 31, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_years_future_range(self):
         """test_years_future_range
 
@@ -373,9 +426,10 @@ class TestDateRange(TestCase):
         unit = 'years'
         range_from = 0
         range_to = 2
-        start = datetime_to_epoch(datetime(2017,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2019, 12, 31, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2017, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2019, 12, 31, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
+
     def test_years_span_range(self):
         """test_years_span_range
 
@@ -384,15 +438,17 @@ class TestDateRange(TestCase):
         unit = 'years'
         range_from = -1
         range_to = 2
-        start = datetime_to_epoch(datetime(2016,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2019, 12, 31, 23, 59, 59))
+        start = datetime_to_epoch(datetime(2016, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2019, 12, 31, 23, 59, 59))
         assert (start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)
 
+
 class TestAbsoluteDateRange(TestCase):
     """TestAbsoluteDateRange
 
     Test helpers.date_ops.absolute_date_range functionality.
     """
+
     def test_bad_unit(self):
         """test_bad_unit
 
@@ -404,7 +460,10 @@ class TestAbsoluteDateRange(TestCase):
         date_to = '2017.01'
         date_to_format = '%Y.%m'
         with pytest.raises(ConfigurationError, match=r'"unit" must be one of'):
-            absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+            absolute_date_range(
+                unit, date_from, date_to, date_from_format, date_to_format
+            )
+
     def test_bad_formats(self):
         """test_bad_formats
 
@@ -412,10 +471,17 @@ class TestAbsoluteDateRange(TestCase):
         ``date_to_format`` is passed.
         """
         unit = 'days'
-        with pytest.raises(ConfigurationError, match=r'Must provide "date_from_format" and "date_to_format"'):
+        with pytest.raises(
+            ConfigurationError,
+            match=r'Must provide "date_from_format" and "date_to_format"',
+        ):
             absolute_date_range(unit, 'meh', 'meh', None, 'meh')
-        with pytest.raises(ConfigurationError, match=r'Must provide "date_from_format" and "date_to_format"'):
+        with pytest.raises(
+            ConfigurationError,
+            match=r'Must provide "date_from_format" and "date_to_format"',
+        ):
             absolute_date_range(unit, 'meh', 'meh', 'meh', None)
+
     def test_bad_dates(self):
         """test_bad_dates
 
@@ -426,9 +492,14 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y.%m'
         date_to_format = '%Y.%m'
         with pytest.raises(ConfigurationError, match=r'Unable to parse "date_from"'):
-            absolute_date_range(unit, 'meh', '2017.01', date_from_format, date_to_format)
+            absolute_date_range(
+                unit, 'meh', '2017.01', date_from_format, date_to_format
+            )
         with pytest.raises(ConfigurationError, match=r'Unable to parse "date_to"'):
-            absolute_date_range(unit, '2017.01', 'meh', date_from_format, date_to_format)
+            absolute_date_range(
+                unit, '2017.01', 'meh', date_from_format, date_to_format
+            )
+
     def test_single_month(self):
         """test_single_month
 
@@ -439,10 +510,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y.%m'
         date_to = '2017.01'
         date_to_format = '%Y.%m'
-        start = datetime_to_epoch(datetime(2017,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  1, 31, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2017, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 1, 31, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_multiple_month(self):
         """test_multiple_month
 
@@ -453,10 +527,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y.%m'
         date_to = '2016.12'
         date_to_format = '%Y.%m'
-        start = datetime_to_epoch(datetime(2016, 11,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2016, 12, 31, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2016, 11, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2016, 12, 31, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_single_year(self):
         """test_single_year
 
@@ -467,10 +544,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y'
         date_to = '2017'
         date_to_format = '%Y'
-        start = datetime_to_epoch(datetime(2017,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017, 12, 31, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2017, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 12, 31, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_multiple_year(self):
         """test_multiple_year
 
@@ -481,10 +561,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y'
         date_to = '2017'
         date_to_format = '%Y'
-        start = datetime_to_epoch(datetime(2016,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017, 12, 31, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2016, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 12, 31, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_single_week_uw(self):
         """test_single_week_UW
 
@@ -495,10 +578,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y-%U'
         date_to = '2017-01'
         date_to_format = '%Y-%U'
-        start = datetime_to_epoch(datetime(2017,  1,  2,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  1,  8, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2017, 1, 2, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 1, 8, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_multiple_weeks_uw(self):
         """test_multiple_weeks_UW
 
@@ -509,10 +595,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y-%U'
         date_to = '2017-04'
         date_to_format = '%Y-%U'
-        start = datetime_to_epoch(datetime(2017,  1,   2,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  1,  29, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2017, 1, 2, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 1, 29, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_single_week_iso(self):
         """test_single_week_ISO
 
@@ -523,10 +612,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%G-%V'
         date_to = '2014-01'
         date_to_format = '%G-%V'
-        start = datetime_to_epoch(datetime(2013, 12, 30,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2014,  1,  5, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2013, 12, 30, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2014, 1, 5, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_multiple_weeks_iso(self):
         """test_multiple_weeks_ISO
 
@@ -537,10 +629,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%G-%V'
         date_to = '2014-04'
         date_to_format = '%G-%V'
-        start = datetime_to_epoch(datetime(2013, 12, 30,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2014,  1, 26, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2013, 12, 30, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2014, 1, 26, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_single_day(self):
         """test_single_day
 
@@ -551,10 +646,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y.%m.%d'
         date_to = '2017.01.01'
         date_to_format = '%Y.%m.%d'
-        start = datetime_to_epoch(datetime(2017,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  1,  1, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2017, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 1, 1, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_multiple_days(self):
         """test_multiple_days
 
@@ -565,10 +663,13 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y.%m.%d'
         date_to = '2017.01.01'
         date_to_format = '%Y.%m.%d'
-        start = datetime_to_epoch(datetime(2016, 12, 31,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  1,  1, 23, 59, 59))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2016, 12, 31, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 1, 1, 23, 59, 59))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
+
     def test_iso8601(self):
         """test_ISO8601
 
@@ -579,16 +680,20 @@ class TestAbsoluteDateRange(TestCase):
         date_from_format = '%Y-%m-%dT%H:%M:%S'
         date_to = '2017-01-01T12:34:56'
         date_to_format = '%Y-%m-%dT%H:%M:%S'
-        start = datetime_to_epoch(datetime(2017,  1,  1,  0,  0,  0))
-        end   = datetime_to_epoch(datetime(2017,  1,  1, 12, 34, 56))
-        result = absolute_date_range(unit, date_from, date_to, date_from_format, date_to_format)
+        start = datetime_to_epoch(datetime(2017, 1, 1, 0, 0, 0))
+        end = datetime_to_epoch(datetime(2017, 1, 1, 12, 34, 56))
+        result = absolute_date_range(
+            unit, date_from, date_to, date_from_format, date_to_format
+        )
         assert (start, end) == result
 
+
 class TestIsDateMath(TestCase):
     """TestIsDateMath
 
     Test helpers.date_ops.isdatemath functionality.
     """
+
     def test_positive(self):
         """test_positive
 
@@ -596,6 +701,7 @@ class TestIsDateMath(TestCase):
         """
         data = '<encapsulated>'
         assert isdatemath(data)
+
     def test_negative(self):
         """test_negative
 
@@ -603,20 +709,25 @@ class TestIsDateMath(TestCase):
         """
         data = 'not_encapsulated'
         assert not isdatemath(data)
+
     def test_raises(self):
         """test_raises
 
         Should raise ConfigurationError exception when malformed data is passed
         """
         data = '<badly_encapsulated'
-        with pytest.raises(ConfigurationError, match=r'Incomplete datemath encapsulation'):
+        with pytest.raises(
+            ConfigurationError, match=r'Incomplete datemath encapsulation'
+        ):
             isdatemath(data)
 
+
 class TestGetDateMath(TestCase):
     """TestGetDateMath
 
     Test helpers.date_ops.get_datemath functionality.
     """
+
     def test_success(self):
         """test_success
 
@@ -628,12 +739,13 @@ class TestGetDateMath(TestCase):
         expected = 'curator_get_datemath_function_' + psuedo_random + '-hasthemath'
         # 5 positional args for meta: status, http_version, headers, duration, node
         meta = ApiResponseMeta(404, '1.1', {}, 0.01, None)
-        body = {'error':{'index':expected}}
+        body = {'error': {'index': expected}}
         msg = 'index_not_found_exception'
         # 3 positional args for NotFoundError: message, meta, body
         effect = NotFoundError(msg, meta, body)
         client.indices.get.side_effect = effect
         self.assertEqual('hasthemath', get_datemath(client, datemath, psuedo_random))
+
     def test_failure(self):
         """test_failure
 
@@ -643,7 +755,7 @@ class TestGetDateMath(TestCase):
         datemath = '{hasthemath}'
         # 5 positional args for meta: status, http_version, headers, duration, node
         meta = ApiResponseMeta(404, '1.1', {}, 0.01, None)
-        body = {'error':{'index':'this_will_not_be_found'}}
+        body = {'error': {'index': 'this_will_not_be_found'}}
         msg = 'index_not_found_exception'
         # 3 positional args for NotFoundError: message, meta, body
         effect = NotFoundError(msg, meta, body)
diff -pruN 8.0.21-1/tests/unit/test_helpers_getters.py 9.0.0-1/tests/unit/test_helpers_getters.py
--- 8.0.21-1/tests/unit/test_helpers_getters.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/unit/test_helpers_getters.py	2025-10-03 20:58:42.000000000 +0000
@@ -4,7 +4,7 @@ from unittest import TestCase
 from unittest.mock import Mock
 import pytest
 from elastic_transport import ApiResponseMeta
-from elasticsearch8 import NotFoundError, TransportError
+from elasticsearch9 import NotFoundError, TransportError
 from curator.exceptions import CuratorException, FailedExecution, MissingArgument
 from curator.helpers import getters
 
diff -pruN 8.0.21-1/tests/unit/test_helpers_testers.py 9.0.0-1/tests/unit/test_helpers_testers.py
--- 8.0.21-1/tests/unit/test_helpers_testers.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/unit/test_helpers_testers.py	2025-10-03 20:58:42.000000000 +0000
@@ -1,54 +1,74 @@
 """Unit tests for utils"""
+
 from unittest import TestCase
-import pytest
 from unittest.mock import Mock
+import pytest
 from elastic_transport import ApiResponseMeta
-from elasticsearch8 import Elasticsearch
-from elasticsearch8.exceptions import AuthenticationException, NotFoundError
+from elasticsearch9 import Elasticsearch
+from elasticsearch9.exceptions import AuthenticationException, NotFoundError
 from curator.exceptions import (
-     ConfigurationError, FailedExecution, MissingArgument, RepositoryException,
-     SearchableSnapshotException)
+    ConfigurationError,
+    FailedExecution,
+    MissingArgument,
+    RepositoryException,
+    SearchableSnapshotException,
+)
 from curator.helpers.testers import (
-    has_lifecycle_name, is_idx_partial, repository_exists, rollable_alias, snapshot_running,
-    validate_filters, verify_client_object, verify_repository)
+    has_lifecycle_name,
+    is_idx_partial,
+    repository_exists,
+    rollable_alias,
+    snapshot_running,
+    validate_filters,
+    verify_client_object,
+    verify_repository,
+)
 
 FAKE_FAIL = Exception('Simulated Failure')
 
+
 class TestRepositoryExists(TestCase):
     """TestRepositoryExists
 
     Test helpers.testers.repository_exists functionality.
     """
+
     def test_missing_arg(self):
         """test_missing_arg
 
         Should raise an exception if the repository isn't passed as an arg
         """
         client = Mock()
-        with pytest.raises(MissingArgument, match=r'No value for "repository" provided'):
+        with pytest.raises(
+            MissingArgument, match=r'No value for "repository" provided'
+        ):
             repository_exists(client)
+
     def test_repository_in_results(self):
         """test_repository_in_results
 
         Should return ``True`` if the passed repository exists
         """
         client = Mock()
-        client.snapshot.get_repository.return_value = {'repo':{'foo':'bar'}}
+        client.snapshot.get_repository.return_value = {'repo': {'foo': 'bar'}}
         assert repository_exists(client, repository="repo")
+
     def test_repo_not_in_results(self):
         """test_repo_not_in_results
 
         Should return ``False`` if the passed repository does not exist
         """
         client = Mock()
-        client.snapshot.get_repository.return_value = {'not_your_repo':{'foo':'bar'}}
+        client.snapshot.get_repository.return_value = {'not_your_repo': {'foo': 'bar'}}
         assert not repository_exists(client, repository="repo")
 
+
 class TestRollableAlias(TestCase):
     """TestRollableAlias
 
     Test helpers.testers.rollable_alias functionality.
     """
+
     def test_return_false_if_no_alias(self):
         """test_return_false_if_no_alias
 
@@ -56,20 +76,25 @@ class TestRollableAlias(TestCase):
         """
         err = 'simulated error'
         client = Mock()
-        client.info.return_value = {'version': {'number': '8.3.3'} }
+        client.info.return_value = {'version': {'number': '8.3.3'}}
         client.indices.get_alias.return_value = {}
         client.indices.get_alias.side_effect = NotFoundError(404, err, err)
         assert not rollable_alias(client, 'foo')
+
     def test_return_false_too_many_indices(self):
         """test_return_false_too_many_indices
 
         Should return ``False`` if alias associated with too many indices
         """
-        retval = {'index-a': {'aliases': {'foo': {}}}, 'index-b': {'aliases': {'foo': {}}}}
+        retval = {
+            'index-a': {'aliases': {'foo': {}}},
+            'index-b': {'aliases': {'foo': {}}},
+        }
         client = Mock()
-        client.info.return_value = {'version': {'number': '8.3.3'} }
+        client.info.return_value = {'version': {'number': '8.3.3'}}
         client.indices.get_alias.return_value = retval
         assert not rollable_alias(client, 'foo')
+
     def test_return_false_non_numeric(self):
         """test_return_false_non_numeric
 
@@ -77,9 +102,10 @@ class TestRollableAlias(TestCase):
         """
         retval = {'index-a': {'aliases': {'foo': {}}}}
         client = Mock()
-        client.info.return_value = {'version': {'number': '8.3.3'} }
+        client.info.return_value = {'version': {'number': '8.3.3'}}
         client.indices.get_alias.return_value = retval
         assert not rollable_alias(client, 'foo')
+
     def test_return_true_two_digits(self):
         """test_return_true_two_digits
 
@@ -87,9 +113,10 @@ class TestRollableAlias(TestCase):
         """
         retval = {'index-00001': {'aliases': {'foo': {}}}}
         client = Mock()
-        client.info.return_value = {'version': {'number': '8.3.3'} }
+        client.info.return_value = {'version': {'number': '8.3.3'}}
         client.indices.get_alias.return_value = retval
         assert rollable_alias(client, 'foo')
+
     def test_return_true_hyphenated(self):
         """test_return_true_hyphenated
 
@@ -97,15 +124,17 @@ class TestRollableAlias(TestCase):
         """
         retval = {'index-2017.03.07-1': {'aliases': {'foo': {}}}}
         client = Mock()
-        client.info.return_value = {'version': {'number': '8.3.3'} }
+        client.info.return_value = {'version': {'number': '8.3.3'}}
         client.indices.get_alias.return_value = retval
         assert rollable_alias(client, 'foo')
 
+
 class TestSnapshotRunning(TestCase):
     """TestSnapshotRunning
 
     Test helpers.testers.snapshot_running functionality
     """
+
     # :pylint disable=line-too-long
     def test_true(self):
         """test_true
@@ -116,6 +145,7 @@ class TestSnapshotRunning(TestCase):
         client.snapshot.status.return_value = {'snapshots': ['running']}
         # self.assertTrue(snapshot_running(client))
         assert snapshot_running(client)
+
     def test_false(self):
         """test_False
 
@@ -125,6 +155,7 @@ class TestSnapshotRunning(TestCase):
         client.snapshot.status.return_value = {'snapshots': []}
         # self.assertFalse(snapshot_running(client))
         assert not snapshot_running(client)
+
     def test_raises_exception(self):
         """test_raises_exception
 
@@ -137,33 +168,44 @@ class TestSnapshotRunning(TestCase):
         with pytest.raises(FailedExecution, match=r'Rerun with loglevel DEBUG'):
             snapshot_running(client)
 
+
 class TestValidateFilters(TestCase):
     """TestValidateFilters
 
     Test helpers.testers.validate_filters functionality.
     """
+
     def test_snapshot_with_index_filter(self):
         """test_snapshot_with_index_filter
 
         Should raise ConfigurationError with improper filter for filtertype
         In this case, an index filtertype (``kibana``) for the ``delete_snapshots`` filter
         """
-        with pytest.raises(ConfigurationError, match=r'filtertype is not compatible with action'):
+        with pytest.raises(
+            ConfigurationError, match=r'filtertype is not compatible with action'
+        ):
             validate_filters('delete_snapshots', [{'filtertype': 'kibana'}])
+
     def test_index_with_snapshot_filter(self):
         """test_index_with_snapshot_filter
 
         Should raise ConfigurationError with improper filter for filtertype
         In this case, a snapshot filtertype (``state``) for the ``delete_indices`` filter
         """
-        with pytest.raises(ConfigurationError, match=r'filtertype is not compatible with action'):
-            validate_filters('delete_indices', [{'filtertype': 'state', 'state': 'SUCCESS'}])
+        with pytest.raises(
+            ConfigurationError, match=r'filtertype is not compatible with action'
+        ):
+            validate_filters(
+                'delete_indices', [{'filtertype': 'state', 'state': 'SUCCESS'}]
+            )
+
 
 class TestVerifyClientObject(TestCase):
     """TestVerifyClientObject
 
     Test helpers.testers.verify_client_object functionality.
     """
+
     def test_is_client_object(self):
         """test_is_client_object
 
@@ -181,13 +223,18 @@ class TestVerifyClientObject(TestCase):
         with pytest.raises(TypeError, match=r'Not a valid client object'):
             verify_client_object(test)
 
+
 class TestVerifyRepository(TestCase):
     """TestVerifyRepository
 
     Test helpers.testers.verify_repository functionality
     """
-    VERIFIED_NODES = {'nodes': {'nodeid1': {'name': 'node1'}, 'nodeid2': {'name': 'node2'}}}
+
+    VERIFIED_NODES = {
+        'nodes': {'nodeid1': {'name': 'node1'}, 'nodeid2': {'name': 'node2'}}
+    }
     REPO_NAME = 'repo_name'
+
     def test_passing(self):
         """test_passing
 
@@ -196,6 +243,7 @@ class TestVerifyRepository(TestCase):
         client = Mock()
         client.snapshot.verify_repository.return_value = self.VERIFIED_NODES
         assert None is verify_repository(client, repository=self.REPO_NAME)
+
     def test_raises_404(self):
         """test_raises_404
 
@@ -210,8 +258,11 @@ class TestVerifyRepository(TestCase):
         # 3 positional args for NotFoundError: message, meta, body
         effect = NotFoundError(msg, meta, body)
         client.snapshot.verify_repository.side_effect = effect
-        with pytest.raises(RepositoryException, match=r'Repository "repo_name" not found'):
+        with pytest.raises(
+            RepositoryException, match=r'Repository "repo_name" not found'
+        ):
             verify_repository(client, repository=self.REPO_NAME)
+
     def test_raises_401(self):
         """test_raises_401
 
@@ -226,8 +277,11 @@ class TestVerifyRepository(TestCase):
         # 3 positional args for NotFoundError: message, meta, body
         effect = AuthenticationException(msg, meta, body)
         client.snapshot.verify_repository.side_effect = effect
-        with pytest.raises(RepositoryException, match=r'Got a 401 response from Elasticsearch'):
+        with pytest.raises(
+            RepositoryException, match=r'Got a 401 response from Elasticsearch'
+        ):
             verify_repository(client, repository=self.REPO_NAME)
+
     def test_raises_other(self):
         """test_raises_other
 
@@ -239,44 +293,57 @@ class TestVerifyRepository(TestCase):
         with pytest.raises(RepositoryException, match=r'Failed to verify'):
             verify_repository(client, repository=self.REPO_NAME)
 
+
 class TestHasLifecycleName(TestCase):
     """TestHasLifecycleName
 
     Test helpers.testers.has_lifecycle_name functionality
     """
+
     def test_has_lifecycle_name(self):
         """test_has_lifecycle_name"""
         testval = {'lifecycle': {'name': 'ilm_policy'}}
         assert has_lifecycle_name(testval)
+
     def test_has_no_lifecycle_name(self):
         """test_has_no_lifecycle_name"""
         testval = {'lifecycle': {'nothere': 'nope'}}
         assert not has_lifecycle_name(testval)
 
+
 class TestIsIdxPartial(TestCase):
     """TestIsIdxPartial
 
     Test helpers.testers.is_idx_partial functionality
     """
+
     def test_is_idx_partial(self):
         """test_is_idx_partial"""
         testval = {'store': {'snapshot': {'partial': True}}}
         assert is_idx_partial(testval)
+
     def test_is_idx_partial_false1(self):
         """test_is_idx_partial_false1"""
         testval = {'store': {'snapshot': {'partial': False}}}
         assert not is_idx_partial(testval)
+
     def test_is_idx_partial_false2(self):
         """test_is_idx_partial_false2"""
         testval = {'store': {'snapshot': {'nothere': 'nope'}}}
         assert not is_idx_partial(testval)
+
     def test_is_idx_partial_raises1(self):
         """test_is_idx_partial_raises1"""
         testval = {'store': {'nothere': 'nope'}}
-        with pytest.raises(SearchableSnapshotException, match='not a mounted searchable snapshot'):
+        with pytest.raises(
+            SearchableSnapshotException, match='not a mounted searchable snapshot'
+        ):
             is_idx_partial(testval)
+
     def test_is_idx_partial_raises2(self):
         """test_is_idx_partial_raises2"""
         testval = {'nothere': 'nope'}
-        with pytest.raises(SearchableSnapshotException, match='not a mounted searchable snapshot'):
+        with pytest.raises(
+            SearchableSnapshotException, match='not a mounted searchable snapshot'
+        ):
             is_idx_partial(testval)
diff -pruN 8.0.21-1/tests/unit/test_validators.py 9.0.0-1/tests/unit/test_validators.py
--- 8.0.21-1/tests/unit/test_validators.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/unit/test_validators.py	2025-10-03 20:58:42.000000000 +0000
@@ -1,8 +1,9 @@
 """Test filter and schema validators"""
+
 from unittest import TestCase
 from voluptuous import Schema
 from es_client.exceptions import FailedValidation
-from es_client.helpers.schemacheck import SchemaCheck
+from es_client.schemacheck import SchemaCheck
 from curator.exceptions import ConfigurationError
 from curator.validators.filter_functions import validfilters, singlefilter
 
@@ -10,226 +11,226 @@ from curator.validators.filter_functions
 def shared_result(config, action):
     """Test functions should be able to reuse this"""
     return SchemaCheck(
-        config,
-        Schema(validfilters(action)),
-        'filters',
-        'testing'
+        config, Schema(validfilters(action)), 'filters', 'testing'
     ).result()
 
+
 class TestFilters(TestCase):
     def test_single_raises_configuration_error(self):
         data = {'max_num_segments': 1, 'exclude': True}
-        self.assertRaises(
-            ConfigurationError,
-            singlefilter,
-            'forcemerge',
-            data
-        )
+        self.assertRaises(ConfigurationError, singlefilter, 'forcemerge', data)
+
 
 class TestFilterTypes(TestCase):
     def test_alias(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'alias',
-                'aliases' : ['alias1', 'alias2'],
-                'exclude' : False,
+                'filtertype': 'alias',
+                'aliases': ['alias1', 'alias2'],
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_age(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'age',
-                'direction' : 'older',
-                'unit' : 'days',
-                'unit_count' : 1,
-                'source' : 'field_stats',
-                'field'  : '@timestamp',
+                'filtertype': 'age',
+                'direction': 'older',
+                'unit': 'days',
+                'unit_count': 1,
+                'source': 'field_stats',
+                'field': '@timestamp',
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_age_with_string_unit_count(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'age',
-                'direction' : 'older',
-                'unit' : 'days',
-                'unit_count' : "1",
-                'source' : 'field_stats',
-                'field'  : '@timestamp',
+                'filtertype': 'age',
+                'direction': 'older',
+                'unit': 'days',
+                'unit_count': "1",
+                'source': 'field_stats',
+                'field': '@timestamp',
             }
         ]
         result = shared_result(config, action)
         self.assertEqual(1, result[0]['unit_count'])
+
     def test_allocated(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'allocated',
-                'key' : 'foo',
-                'value' : 'bar',
-                'allocation_type' : 'require',
-                'exclude' : False,
+                'filtertype': 'allocated',
+                'key': 'foo',
+                'value': 'bar',
+                'allocation_type': 'require',
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_closed(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'closed',
-                'exclude' : False,
+                'filtertype': 'closed',
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_count(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'count',
-                'count' : 1,
-                'reverse' : True,
-                'exclude' : False,
+                'filtertype': 'count',
+                'count': 1,
+                'reverse': True,
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_forcemerged(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'forcemerged',
-                'max_num_segments' : 1,
-                'exclude' : False,
+                'filtertype': 'forcemerged',
+                'max_num_segments': 1,
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_kibana(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'kibana',
-                'exclude' : False,
+                'filtertype': 'kibana',
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_opened(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'opened',
-                'exclude' : False,
+                'filtertype': 'opened',
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_space_name_age(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'space',
-                'disk_space' : 1,
-                'use_age' : True,
-                'exclude' : False,
-                'source' : 'name',
-                'timestring' : '%Y.%m.%d',
+                'filtertype': 'space',
+                'disk_space': 1,
+                'use_age': True,
+                'exclude': False,
+                'source': 'name',
+                'timestring': '%Y.%m.%d',
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_space_name_age_string_float(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'space',
-                'disk_space' : "1.0",
-                'use_age' : True,
-                'exclude' : False,
-                'source' : 'name',
-                'timestring' : '%Y.%m.%d',
+                'filtertype': 'space',
+                'disk_space': "1.0",
+                'use_age': True,
+                'exclude': False,
+                'source': 'name',
+                'timestring': '%Y.%m.%d',
             }
         ]
         result = shared_result(config, action)
         self.assertEqual(1.0, result[0]['disk_space'])
+
     def test_space_name_age_no_ts(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'space',
-                'disk_space' : 1,
-                'use_age' : True,
-                'exclude' : False,
-                'source' : 'name',
+                'filtertype': 'space',
+                'disk_space': 1,
+                'use_age': True,
+                'exclude': False,
+                'source': 'name',
             }
         ]
-        schema = SchemaCheck(
-            config,
-            Schema(validfilters(action)),
-            'filters',
-            'testing'
-        )
+        schema = SchemaCheck(config, Schema(validfilters(action)), 'filters', 'testing')
         self.assertRaises(FailedValidation, schema.result)
+
     def test_space_field_stats_age(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'space',
-                'disk_space' : 1,
-                'use_age' : True,
-                'exclude' : False,
-                'source' : 'field_stats',
-                'field' : '@timestamp',
+                'filtertype': 'space',
+                'disk_space': 1,
+                'use_age': True,
+                'exclude': False,
+                'source': 'field_stats',
+                'field': '@timestamp',
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_space_field_stats_age_no_field(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'space',
-                'disk_space' : 1,
-                'use_age' : True,
-                'exclude' : False,
-                'source' : 'field_stats',
+                'filtertype': 'space',
+                'disk_space': 1,
+                'use_age': True,
+                'exclude': False,
+                'source': 'field_stats',
             }
         ]
-        schema = SchemaCheck(
-            config,
-            Schema(validfilters(action)),
-            'filters',
-            'testing'
-        )
+        schema = SchemaCheck(config, Schema(validfilters(action)), 'filters', 'testing')
         self.assertRaises(FailedValidation, schema.result)
+
     def test_space_creation_date_age(self):
         action = 'delete_indices'
         config = [
             {
-                'filtertype' : 'space',
-                'disk_space' : 1,
-                'use_age' : True,
-                'exclude' : False,
-                'source' : 'creation_date',
+                'filtertype': 'space',
+                'disk_space': 1,
+                'use_age': True,
+                'exclude': False,
+                'source': 'creation_date',
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_state(self):
         action = 'delete_snapshots'
         config = [
             {
-                'filtertype' : 'state',
-                'state' : 'SUCCESS',
-                'exclude' : False,
+                'filtertype': 'state',
+                'state': 'SUCCESS',
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
+
     def test_shards(self):
         action = 'shrink'
         config = [
             {
-                'filtertype' : 'shards',
-                'number_of_shards' : 5,
+                'filtertype': 'shards',
+                'number_of_shards': 5,
                 'shard_filter_behavior': 'greater_than',
-                'exclude' : False,
+                'exclude': False,
             }
         ]
         self.assertEqual(config, shared_result(config, action))
diff -pruN 8.0.21-1/tests/unit/testvars.py 9.0.0-1/tests/unit/testvars.py
--- 8.0.21-1/tests/unit/testvars.py	2025-04-02 05:40:02.000000000 +0000
+++ 9.0.0-1/tests/unit/testvars.py	2025-10-03 20:58:42.000000000 +0000
@@ -1,268 +1,405 @@
-from elasticsearch8 import ConflictError, NotFoundError, TransportError
+from elasticsearch9 import ConflictError, NotFoundError, TransportError
 
-fake_fail      = Exception('Simulated Failure')
-four_oh_one    = TransportError(401, "simulated error")
-four_oh_four   = TransportError(404, "simulated error")
+fake_fail = Exception('Simulated Failure')
+four_oh_one = TransportError(401, "simulated error")
+four_oh_four = TransportError(404, "simulated error")
 get_alias_fail = NotFoundError(404, 'simulated error', 'simulated error')
-named_index    = 'index_name'
-named_indices  = [ "index-2015.01.01", "index-2015.02.01" ]
-open_index     = {'metadata': {'indices' : { named_index : {'state' : 'open'}}}}
-closed_index   = {'metadata': {'indices' : { named_index : {'state' : 'close'}}}}
+named_index = 'index_name'
+named_indices = ["index-2015.01.01", "index-2015.02.01"]
+open_index = {'metadata': {'indices': {named_index: {'state': 'open'}}}}
+closed_index = {'metadata': {'indices': {named_index: {'state': 'close'}}}}
 cat_open_index = [{'status': 'open'}]
 cat_closed_index = [{'status': 'close'}]
-open_indices   = { 'metadata': { 'indices' : { 'index1' : { 'state' : 'open' },
-                                               'index2' : { 'state' : 'open' }}}}
-closed_indices = { 'metadata': { 'indices' : { 'index1' : { 'state' : 'close' },
-                                               'index2' : { 'state' : 'close' }}}}
-named_alias    = 'alias_name'
-alias_retval   = { "pre_aliased_index": { "aliases" : { named_alias : { }}}}
-rollable_alias = { "index-000001": { "aliases" : { named_alias : { }}}}
-rollover_conditions = { 'conditions': { 'max_age': '1s' } }
+open_indices = {
+    'metadata': {'indices': {'index1': {'state': 'open'}, 'index2': {'state': 'open'}}}
+}
+closed_indices = {
+    'metadata': {
+        'indices': {'index1': {'state': 'close'}, 'index2': {'state': 'close'}}
+    }
+}
+named_alias = 'alias_name'
+alias_retval = {"pre_aliased_index": {"aliases": {named_alias: {}}}}
+rollable_alias = {"index-000001": {"aliases": {named_alias: {}}}}
+rollover_conditions = {'conditions': {'max_age': '1s'}}
 dry_run_rollover = {
-  "acknowledged": True,
-  "shards_acknowledged": True,
-  "old_index": "index-000001",
-  "new_index": "index-000002",
-  "rolled_over": False,
-  "dry_run": True,
-  "conditions": {
-    "max_age" : "1s"
-  }
+    "acknowledged": True,
+    "shards_acknowledged": True,
+    "old_index": "index-000001",
+    "new_index": "index-000002",
+    "rolled_over": False,
+    "dry_run": True,
+    "conditions": {"max_age": "1s"},
 }
 aliases_retval = {
-    "index1": { "aliases" : { named_alias : { } } },
-    "index2": { "aliases" : { named_alias : { } } },
+    "index1": {"aliases": {named_alias: {}}},
+    "index2": {"aliases": {named_alias: {}}},
+}
+alias_one_add = [{'add': {'alias': 'alias', 'index': 'index_name'}}]
+alias_one_add_with_extras = [
+    {
+        'add': {
+            'alias': 'alias',
+            'index': 'index_name',
+            'filter': {'term': {'user': 'kimchy'}},
+        }
     }
-alias_one_add  = [{'add': {'alias': 'alias', 'index': 'index_name'}}]
-alias_one_add_with_extras  = [
-    { 'add': {
-            'alias': 'alias', 'index': 'index_name',
-            'filter' : { 'term' : { 'user' : 'kimchy' }}
-            }
-    }]
-alias_one_rm   = [{'remove': {'alias': 'my_alias', 'index': named_index}}]
-alias_one_body = { "actions" : [
-                        {'remove': {'alias': 'alias', 'index': 'index_name'}},
-                        {'add': {'alias': 'alias', 'index': 'index_name'}}
-                 ]}
-alias_two_add  = [
-                    {'add': {'alias': 'alias', 'index': 'index-2016.03.03'}},
-                    {'add': {'alias': 'alias', 'index': 'index-2016.03.04'}},
-                 ]
-alias_two_rm   = [
-                    {'remove': {'alias': 'my_alias', 'index': 'index-2016.03.03'}},
-                    {'remove': {'alias': 'my_alias', 'index': 'index-2016.03.04'}},
-                 ]
-alias_success  = { "acknowledged": True }
-allocation_in  = {named_index: {'settings': {'index': {'routing': {'allocation': {'require': {'foo': 'bar'}}}}}}}
-allocation_out = {named_index: {'settings': {'index': {'routing': {'allocation': {'require': {'not': 'foo'}}}}}}}
-indices_space  = { 'indices' : {
-        'index1' : { 'index' : { 'primary_size_in_bytes': 1083741824 }},
-        'index2' : { 'index' : { 'primary_size_in_bytes': 1083741824 }}}}
-snap_name      = 'snap_name'
-repo_name      = 'repo_name'
-test_repo      = {repo_name: {'type': 'fs', 'settings': {'compress': 'true', 'location': '/tmp/repos/repo_name'}}}
-test_repos     = {'TESTING': {'type': 'fs', 'settings': {'compress': 'true', 'location': '/tmp/repos/TESTING'}},
-                  repo_name: {'type': 'fs', 'settings': {'compress': 'true', 'location': '/rmp/repos/repo_name'}}}
-snap_running   = { 'snapshots': ['running'] }
-nosnap_running = { 'snapshots': [] }
-snapshot       = { 'snapshots': [
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'SUCCESS',
-                        'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1422748800
-                    }]}
-oneinprogress  = { 'snapshots': [
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-03-01T00:00:02.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'IN_PROGRESS',
-                        'snapshot': snap_name, 'end_time': '2015-03-01T00:00:03.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1425168002
-                    }]}
-partial        = { 'snapshots': [
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'PARTIAL',
-                        'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1422748800
-                    }]}
-failed         = { 'snapshots': [
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'FAILED',
-                        'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1422748800
-                    }]}
-othersnap      = { 'snapshots': [
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'SOMETHINGELSE',
-                        'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1422748800
-                    }]}
-snapshots         = { 'snapshots': [
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'SUCCESS',
-                        'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1422748800
-                    },
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-03-01T00:00:02.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'SUCCESS',
-                        'snapshot': 'snapshot-2015.03.01', 'end_time': '2015-03-01T00:00:03.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1425168002
-                    }]}
-inprogress        = { 'snapshots': [
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'SUCCESS',
-                        'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1422748800
-                    },
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-03-01T00:00:02.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'IN_PROGRESS',
-                        'snapshot': 'snapshot-2015.03.01', 'end_time': '2015-03-01T00:00:03.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1425168002
-                    }]}
-highly_unlikely   = { 'snapshots': [
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'IN_PROGRESS',
-                        'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1422748800
-                    },
-                    {
-                        'duration_in_millis': 60000, 'start_time': '2015-03-01T00:00:02.000Z',
-                        'shards': {'successful': 4, 'failed': 0, 'total': 4},
-                        'end_time_in_millis': 0, 'state': 'IN_PROGRESS',
-                        'snapshot': 'snapshot-2015.03.01', 'end_time': '2015-03-01T00:00:03.000Z',
-                        'indices': named_indices,
-                        'failures': [], 'start_time_in_millis': 1425168002
-                    }]}
-snap_body_all   = {
-                    "ignore_unavailable": False,
-                    "include_global_state": True,
-                    "partial": False,
-                    "indices" : "_all"
-                  }
-snap_body       = {
-                    "ignore_unavailable": False,
-                    "include_global_state": True,
-                    "partial": False,
-                    "indices" : "index-2015.01.01,index-2015.02.01"
-                  }
-verified_nodes  = {'nodes': {'nodeid1': {'name': 'node1'}, 'nodeid2': {'name': 'node2'}}}
-synced_pass     = {
-                    "_shards":{"total":1,"successful":1,"failed":0},
-                    "index_name":{
-                        "total":1,"successful":1,"failed":0,
-                        "failures":[],
-                    }
-                  }
-synced_fail     = {
-                    "_shards":{"total":1,"successful":0,"failed":1},
-                    "index_name":{
-                        "total":1,"successful":0,"failed":1,
-                        "failures":[
-                            {"shard":0,"reason":"pending operations","routing":{"state":"STARTED","primary":True,"node":"nodeid1","relocating_node":None,"shard":0,"index":"index_name"}},
-                        ]
-                    }
-                  }
-sync_conflict   = ConflictError(409, '{"_shards":{"total":1,"successful":0,"failed":1},"index_name":{"total":1,"successful":0,"failed":1,"failures":[{"shard":0,"reason":"pending operations","routing":{"state":"STARTED","primary":true,"node":"nodeid1","relocating_node":null,"shard":0,"index":"index_name"}}]}})', synced_fail)
-synced_fails    = {
-                    "_shards":{"total":2,"successful":1,"failed":1},
-                    "index1":{
-                        "total":1,"successful":0,"failed":1,
-                        "failures":[
-                            {"shard":0,"reason":"pending operations","routing":{"state":"STARTED","primary":True,"node":"nodeid1","relocating_node":None,"shard":0,"index":"index_name"}},
-                        ]
-                    },
-                    "index2":{
-                        "total":1,"successful":1,"failed":0,
-                        "failures":[]
-                    },
-                  }
+]
+alias_one_rm = [{'remove': {'alias': 'my_alias', 'index': named_index}}]
+alias_one_body = {
+    "actions": [
+        {'remove': {'alias': 'alias', 'index': 'index_name'}},
+        {'add': {'alias': 'alias', 'index': 'index_name'}},
+    ]
+}
+alias_two_add = [
+    {'add': {'alias': 'alias', 'index': 'index-2016.03.03'}},
+    {'add': {'alias': 'alias', 'index': 'index-2016.03.04'}},
+]
+alias_two_rm = [
+    {'remove': {'alias': 'my_alias', 'index': 'index-2016.03.03'}},
+    {'remove': {'alias': 'my_alias', 'index': 'index-2016.03.04'}},
+]
+alias_success = {"acknowledged": True}
+allocation_in = {
+    named_index: {
+        'settings': {'index': {'routing': {'allocation': {'require': {'foo': 'bar'}}}}}
+    }
+}
+allocation_out = {
+    named_index: {
+        'settings': {'index': {'routing': {'allocation': {'require': {'not': 'foo'}}}}}
+    }
+}
+indices_space = {
+    'indices': {
+        'index1': {'index': {'primary_size_in_bytes': 1083741824}},
+        'index2': {'index': {'primary_size_in_bytes': 1083741824}},
+    }
+}
+snap_name = 'snap_name'
+repo_name = 'repo_name'
+test_repo = {
+    repo_name: {
+        'type': 'fs',
+        'settings': {'compress': 'true', 'location': '/tmp/repos/repo_name'},
+    }
+}
+test_repos = {
+    'TESTING': {
+        'type': 'fs',
+        'settings': {'compress': 'true', 'location': '/tmp/repos/TESTING'},
+    },
+    repo_name: {
+        'type': 'fs',
+        'settings': {'compress': 'true', 'location': '/rmp/repos/repo_name'},
+    },
+}
+snap_running = {'snapshots': ['running']}
+nosnap_running = {'snapshots': []}
+snapshot = {
+    'snapshots': [
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-02-01T00:00:00.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'SUCCESS',
+            'snapshot': snap_name,
+            'end_time': '2015-02-01T00:00:01.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1422748800,
+        }
+    ]
+}
+oneinprogress = {
+    'snapshots': [
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-03-01T00:00:02.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'IN_PROGRESS',
+            'snapshot': snap_name,
+            'end_time': '2015-03-01T00:00:03.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1425168002,
+        }
+    ]
+}
+partial = {
+    'snapshots': [
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-02-01T00:00:00.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'PARTIAL',
+            'snapshot': snap_name,
+            'end_time': '2015-02-01T00:00:01.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1422748800,
+        }
+    ]
+}
+failed = {
+    'snapshots': [
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-02-01T00:00:00.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'FAILED',
+            'snapshot': snap_name,
+            'end_time': '2015-02-01T00:00:01.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1422748800,
+        }
+    ]
+}
+othersnap = {
+    'snapshots': [
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-02-01T00:00:00.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'SOMETHINGELSE',
+            'snapshot': snap_name,
+            'end_time': '2015-02-01T00:00:01.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1422748800,
+        }
+    ]
+}
+snapshots = {
+    'snapshots': [
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-02-01T00:00:00.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'SUCCESS',
+            'snapshot': snap_name,
+            'end_time': '2015-02-01T00:00:01.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1422748800,
+        },
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-03-01T00:00:02.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'SUCCESS',
+            'snapshot': 'snapshot-2015.03.01',
+            'end_time': '2015-03-01T00:00:03.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1425168002,
+        },
+    ]
+}
+inprogress = {
+    'snapshots': [
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-02-01T00:00:00.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'SUCCESS',
+            'snapshot': snap_name,
+            'end_time': '2015-02-01T00:00:01.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1422748800,
+        },
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-03-01T00:00:02.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'IN_PROGRESS',
+            'snapshot': 'snapshot-2015.03.01',
+            'end_time': '2015-03-01T00:00:03.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1425168002,
+        },
+    ]
+}
+highly_unlikely = {
+    'snapshots': [
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-02-01T00:00:00.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'IN_PROGRESS',
+            'snapshot': snap_name,
+            'end_time': '2015-02-01T00:00:01.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1422748800,
+        },
+        {
+            'duration_in_millis': 60000,
+            'start_time': '2015-03-01T00:00:02.000Z',
+            'shards': {'successful': 4, 'failed': 0, 'total': 4},
+            'end_time_in_millis': 0,
+            'state': 'IN_PROGRESS',
+            'snapshot': 'snapshot-2015.03.01',
+            'end_time': '2015-03-01T00:00:03.000Z',
+            'indices': named_indices,
+            'failures': [],
+            'start_time_in_millis': 1425168002,
+        },
+    ]
+}
+snap_body_all = {
+    "ignore_unavailable": False,
+    "include_global_state": True,
+    "partial": False,
+    "indices": "_all",
+}
+snap_body = {
+    "ignore_unavailable": False,
+    "include_global_state": True,
+    "partial": False,
+    "indices": "index-2015.01.01,index-2015.02.01",
+}
+verified_nodes = {'nodes': {'nodeid1': {'name': 'node1'}, 'nodeid2': {'name': 'node2'}}}
+synced_pass = {
+    "_shards": {"total": 1, "successful": 1, "failed": 0},
+    "index_name": {
+        "total": 1,
+        "successful": 1,
+        "failed": 0,
+        "failures": [],
+    },
+}
+synced_fail = {
+    "_shards": {"total": 1, "successful": 0, "failed": 1},
+    "index_name": {
+        "total": 1,
+        "successful": 0,
+        "failed": 1,
+        "failures": [
+            {
+                "shard": 0,
+                "reason": "pending operations",
+                "routing": {
+                    "state": "STARTED",
+                    "primary": True,
+                    "node": "nodeid1",
+                    "relocating_node": None,
+                    "shard": 0,
+                    "index": "index_name",
+                },
+            },
+        ],
+    },
+}
+sync_conflict = ConflictError(
+    409,
+    '{"_shards":{"total":1,"successful":0,"failed":1},"index_name":{"total":1,"successful":0,"failed":1,"failures":[{"shard":0,"reason":"pending operations","routing":{"state":"STARTED","primary":true,"node":"nodeid1","relocating_node":null,"shard":0,"index":"index_name"}}]}})',
+    synced_fail,
+)
+synced_fails = {
+    "_shards": {"total": 2, "successful": 1, "failed": 1},
+    "index1": {
+        "total": 1,
+        "successful": 0,
+        "failed": 1,
+        "failures": [
+            {
+                "shard": 0,
+                "reason": "pending operations",
+                "routing": {
+                    "state": "STARTED",
+                    "primary": True,
+                    "node": "nodeid1",
+                    "relocating_node": None,
+                    "shard": 0,
+                    "index": "index_name",
+                },
+            },
+        ],
+    },
+    "index2": {"total": 1, "successful": 1, "failed": 0, "failures": []},
+}
 
-state_one  = [{'index': named_index, 'status': 'open'}]
+state_one = [{'index': named_index, 'status': 'open'}]
 
-settings_one   = {
+settings_one = {
     named_index: {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'random_uuid_string_here',
-                'number_of_shards': '2', 'creation_date': '1456963200172',
+                'number_of_replicas': '1',
+                'uuid': 'random_uuid_string_here',
+                'number_of_shards': '2',
+                'creation_date': '1456963200172',
                 'routing': {'allocation': {'include': {'tag': 'foo'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
+        },
     }
 }
 
-settings_1_get_aliases = { named_index: { "aliases" : { 'my_alias' : { } } } }
+settings_1_get_aliases = {named_index: {"aliases": {'my_alias': {}}}}
 
 state_two = [
     {'index': 'index-2016.03.03', 'status': 'open'},
-    {'index': 'index-2016.03.04', 'status': 'open'}
+    {'index': 'index-2016.03.04', 'status': 'open'},
 ]
 
-settings_two  = {
+settings_two = {
     'index-2016.03.03': {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'random_uuid_string_here',
-                'number_of_shards': '5', 'creation_date': '1456963200172',
+                'number_of_replicas': '1',
+                'uuid': 'random_uuid_string_here',
+                'number_of_shards': '5',
+                'creation_date': '1456963200172',
                 'routing': {'allocation': {'include': {'tag': 'foo'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
+        },
     },
     'index-2016.03.04': {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'another_random_uuid_string',
-                'number_of_shards': '5', 'creation_date': '1457049600812',
+                'number_of_replicas': '1',
+                'uuid': 'another_random_uuid_string',
+                'number_of_shards': '5',
+                'creation_date': '1457049600812',
                 'routing': {'allocation': {'include': {'tag': 'bar'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
-    }
+        },
+    },
 }
 
 settings_2_get_aliases = {
-    "index-2016.03.03": { "aliases" : { 'my_alias' : { } } },
-    "index-2016.03.04": { "aliases" : { 'my_alias' : { } } },
+    "index-2016.03.03": {"aliases": {'my_alias': {}}},
+    "index-2016.03.04": {"aliases": {'my_alias': {}}},
 }
 
-state_2_closed  = [
+state_2_closed = [
     {'index': 'index-2016.03.03', 'status': 'close'},
     {'index': 'index-2016.03.04', 'status': 'open'},
 ]
@@ -273,86 +410,104 @@ settings_2_closed = {
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'random_uuid_string_here',
-                'number_of_shards': '5', 'creation_date': '1456963200172',
+                'number_of_replicas': '1',
+                'uuid': 'random_uuid_string_here',
+                'number_of_shards': '5',
+                'creation_date': '1456963200172',
                 'routing': {'allocation': {'include': {'tag': 'foo'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
+        },
     },
     'index-2016.03.04': {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'another_random_uuid_string',
-                'number_of_shards': '5', 'creation_date': '1457049600812',
+                'number_of_replicas': '1',
+                'uuid': 'another_random_uuid_string',
+                'number_of_shards': '5',
+                'creation_date': '1457049600812',
                 'routing': {'allocation': {'include': {'tag': 'bar'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
-    }
+        },
+    },
 }
 
-state_four  = [
+state_four = [
     {'index': 'a-2016.03.03', 'status': 'open'},
     {'index': 'b-2016.03.04', 'status': 'open'},
     {'index': 'c-2016.03.05', 'status': 'close'},
     {'index': 'd-2016.03.06', 'status': 'open'},
 ]
 
-settings_four  = {
+settings_four = {
     'a-2016.03.03': {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'random_uuid_string_here',
-                'number_of_shards': '5', 'creation_date': '1456963200172',
+                'number_of_replicas': '1',
+                'uuid': 'random_uuid_string_here',
+                'number_of_shards': '5',
+                'creation_date': '1456963200172',
                 'routing': {'allocation': {'include': {'tag': 'foo'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
+        },
     },
     'b-2016.03.04': {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'another_random_uuid_string',
-                'number_of_shards': '5', 'creation_date': '1457049600812',
+                'number_of_replicas': '1',
+                'uuid': 'another_random_uuid_string',
+                'number_of_shards': '5',
+                'creation_date': '1457049600812',
                 'routing': {'allocation': {'include': {'tag': 'bar'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
+        },
     },
     'c-2016.03.05': {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'random_uuid_string_here',
-                'number_of_shards': '5', 'creation_date': '1457136000933',
+                'number_of_replicas': '1',
+                'uuid': 'random_uuid_string_here',
+                'number_of_shards': '5',
+                'creation_date': '1457136000933',
                 'routing': {'allocation': {'include': {'tag': 'foo'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
+        },
     },
     'd-2016.03.06': {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'another_random_uuid_string',
-                'number_of_shards': '5', 'creation_date': '1457222400527',
+                'number_of_replicas': '1',
+                'uuid': 'another_random_uuid_string',
+                'number_of_shards': '5',
+                'creation_date': '1457222400527',
                 'routing': {'allocation': {'include': {'tag': 'bar'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
-    }
+        },
+    },
 }
 
-state_named  = [
+state_named = [
     {'index': 'index-2015.01.01', 'status': 'open'},
     {'index': 'index-2015.02.01', 'status': 'open'},
 ]
@@ -363,88 +518,94 @@ settings_named = {
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'random_uuid_string_here',
-                'number_of_shards': '5', 'creation_date': '1456963200172',
+                'number_of_replicas': '1',
+                'uuid': 'random_uuid_string_here',
+                'number_of_shards': '5',
+                'creation_date': '1456963200172',
                 'routing': {'allocation': {'include': {'tag': 'foo'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
+        },
     },
     'index-2015.02.01': {
         'aliases': ['my_alias'],
         'mappings': {},
         'settings': {
             'index': {
-                'number_of_replicas': '1', 'uuid': 'another_random_uuid_string',
-                'number_of_shards': '5', 'creation_date': '1457049600812',
+                'number_of_replicas': '1',
+                'uuid': 'another_random_uuid_string',
+                'number_of_shards': '5',
+                'creation_date': '1457049600812',
                 'routing': {'allocation': {'include': {'tag': 'bar'}}},
-                'version': {'created': '2020099'}, 'refresh_interval': '5s'
+                'version': {'created': '2020099'},
+                'refresh_interval': '5s',
             }
-        }
-    }
+        },
+    },
 }
 
-stats_one      = {
+stats_one = {
     'indices': {
-        named_index : {
+        named_index: {
             'total': {
                 'docs': {'count': 6374962, 'deleted': 0},
-                'store': {'size_in_bytes': 1115219663, 'throttle_time_in_millis': 0}
+                'store': {'size_in_bytes': 1115219663, 'throttle_time_in_millis': 0},
             },
             'primaries': {
                 'docs': {'count': 3187481, 'deleted': 0},
-                'store': {'size_in_bytes': 557951789, 'throttle_time_in_millis': 0}
-            }
+                'store': {'size_in_bytes': 557951789, 'throttle_time_in_millis': 0},
+            },
         }
     }
 }
 
-stats_two      = {
+stats_two = {
     'indices': {
         'index-2016.03.03': {
             'total': {
                 'docs': {'count': 6374962, 'deleted': 0},
-                'store': {'size_in_bytes': 1115219663, 'throttle_time_in_millis': 0}
+                'store': {'size_in_bytes': 1115219663, 'throttle_time_in_millis': 0},
             },
             'primaries': {
                 'docs': {'count': 3187481, 'deleted': 0},
-                'store': {'size_in_bytes': 557951789, 'throttle_time_in_millis': 0}
-            }
+                'store': {'size_in_bytes': 557951789, 'throttle_time_in_millis': 0},
+            },
         },
         'index-2016.03.04': {
             'total': {
                 'docs': {'count': 6377544, 'deleted': 0},
-                'store': {'size_in_bytes': 1120891046, 'throttle_time_in_millis': 0}
+                'store': {'size_in_bytes': 1120891046, 'throttle_time_in_millis': 0},
             },
             'primaries': {
                 'docs': {'count': 3188772, 'deleted': 0},
-                'store': {'size_in_bytes': 560677114, 'throttle_time_in_millis': 0}
-            }
-        }
+                'store': {'size_in_bytes': 560677114, 'throttle_time_in_millis': 0},
+            },
+        },
     }
 }
 
-stats_four      = {
+stats_four = {
     'indices': {
         'a-2016.03.03': {
             'total': {
                 'docs': {'count': 6374962, 'deleted': 0},
-                'store': {'size_in_bytes': 1115219663, 'throttle_time_in_millis': 0}
+                'store': {'size_in_bytes': 1115219663, 'throttle_time_in_millis': 0},
             },
             'primaries': {
                 'docs': {'count': 3187481, 'deleted': 0},
-                'store': {'size_in_bytes': 557951789, 'throttle_time_in_millis': 0}
-            }
+                'store': {'size_in_bytes': 557951789, 'throttle_time_in_millis': 0},
+            },
         },
         'b-2016.03.04': {
             'total': {
                 'docs': {'count': 6377544, 'deleted': 0},
-                'store': {'size_in_bytes': 1120891046, 'throttle_time_in_millis': 0}
+                'store': {'size_in_bytes': 1120891046, 'throttle_time_in_millis': 0},
             },
             'primaries': {
                 'docs': {'count': 3188772, 'deleted': 0},
-                'store': {'size_in_bytes': 560677114, 'throttle_time_in_millis': 0}
-            }
+                'store': {'size_in_bytes': 560677114, 'throttle_time_in_millis': 0},
+            },
         },
         # CLOSED, ergo, not present
         # 'c-2016.03.05': {
@@ -460,29 +621,35 @@ stats_four      = {
         'd-2016.03.06': {
             'total': {
                 'docs': {'count': 6266436, 'deleted': 0},
-                'store': {'size_in_bytes': 1120882168, 'throttle_time_in_millis': 0}
+                'store': {'size_in_bytes': 1120882168, 'throttle_time_in_millis': 0},
             },
             'primaries': {
                 'docs': {'count': 3133218, 'deleted': 0},
-                'store': {'size_in_bytes': 560441084, 'throttle_time_in_millis': 0}
-            }
-        }
-
+                'store': {'size_in_bytes': 560441084, 'throttle_time_in_millis': 0},
+            },
+        },
     }
 }
 
 fieldstats_one = {
     'indices': {
-        named_index : {
+        named_index: {
             'fields': {
                 'timestamp': {
                     'density': 100,
                     'min_value_as_string': '2016-03-03T00:00:06.189Z',
-                    'max_value': 1457049599152, 'max_doc': 415651,
-                    'min_value': 1456963206189, 'doc_count': 415651,
+                    'max_value': 1457049599152,
+                    'max_doc': 415651,
+                    'min_value': 1456963206189,
+                    'doc_count': 415651,
                     'max_value_as_string': '2016-03-03T23:59:59.152Z',
-                    'sum_total_term_freq': -1, 'sum_doc_freq': 1662604}}}}
+                    'sum_total_term_freq': -1,
+                    'sum_doc_freq': 1662604,
+                }
+            }
+        }
     }
+}
 
 fieldstats_two = {
     'indices': {
@@ -491,19 +658,31 @@ fieldstats_two = {
                 'timestamp': {
                     'density': 100,
                     'min_value_as_string': '2016-03-03T00:00:06.189Z',
-                    'max_value': 1457049599152, 'max_doc': 415651,
-                    'min_value': 1456963206189, 'doc_count': 415651,
+                    'max_value': 1457049599152,
+                    'max_doc': 415651,
+                    'min_value': 1456963206189,
+                    'doc_count': 415651,
                     'max_value_as_string': '2016-03-03T23:59:59.152Z',
-                    'sum_total_term_freq': -1, 'sum_doc_freq': 1662604}}},
+                    'sum_total_term_freq': -1,
+                    'sum_doc_freq': 1662604,
+                }
+            }
+        },
         'index-2016.03.04': {
             'fields': {
                 'timestamp': {
                     'density': 100,
                     'min_value_as_string': '2016-03-04T00:00:00.812Z',
-                    'max_value': 1457135999223, 'max_doc': 426762,
-                    'min_value': 1457049600812, 'doc_count': 426762,
+                    'max_value': 1457135999223,
+                    'max_doc': 426762,
+                    'min_value': 1457049600812,
+                    'doc_count': 426762,
                     'max_value_as_string': '2016-03-04T23:59:59.223Z',
-                    'sum_total_term_freq': -1, 'sum_doc_freq': 1673715}}},
+                    'sum_total_term_freq': -1,
+                    'sum_doc_freq': 1673715,
+                }
+            }
+        },
     }
 }
 
@@ -514,59 +693,90 @@ fieldstats_four = {
                 'timestamp': {
                     'density': 100,
                     'min_value_as_string': '2016-03-03T00:00:06.189Z',
-                    'max_value': 1457049599152, 'max_doc': 415651,
-                    'min_value': 1456963206189, 'doc_count': 415651,
+                    'max_value': 1457049599152,
+                    'max_doc': 415651,
+                    'min_value': 1456963206189,
+                    'doc_count': 415651,
                     'max_value_as_string': '2016-03-03T23:59:59.152Z',
-                    'sum_total_term_freq': -1, 'sum_doc_freq': 1662604}}},
+                    'sum_total_term_freq': -1,
+                    'sum_doc_freq': 1662604,
+                }
+            }
+        },
         'b-2016.03.04': {
             'fields': {
                 'timestamp': {
                     'density': 100,
                     'min_value_as_string': '2016-03-04T00:00:00.812Z',
-                    'max_value': 1457135999223, 'max_doc': 426762,
-                    'min_value': 1457049600812, 'doc_count': 426762,
+                    'max_value': 1457135999223,
+                    'max_doc': 426762,
+                    'min_value': 1457049600812,
+                    'doc_count': 426762,
                     'max_value_as_string': '2016-03-04T23:59:59.223Z',
-                    'sum_total_term_freq': -1, 'sum_doc_freq': 1673715}}},
+                    'sum_total_term_freq': -1,
+                    'sum_doc_freq': 1673715,
+                }
+            }
+        },
         'd-2016.03.06': {
             'fields': {
                 'timestamp': {
                     'density': 100,
                     'min_value_as_string': '2016-03-04T00:00:00.812Z',
-                    'max_value': 1457308799223, 'max_doc': 426762,
-                    'min_value': 1457222400567, 'doc_count': 426762,
+                    'max_value': 1457308799223,
+                    'max_doc': 426762,
+                    'min_value': 1457222400567,
+                    'doc_count': 426762,
                     'max_value_as_string': '2016-03-04T23:59:59.223Z',
-                    'sum_total_term_freq': -1, 'sum_doc_freq': 1673715}}},
+                    'sum_total_term_freq': -1,
+                    'sum_doc_freq': 1673715,
+                }
+            }
+        },
     }
 }
 
 fieldstats_query = {
     'aggregations': {
-        'min' : {
+        'min': {
             'value_as_string': '2016-03-03T00:00:06.189Z',
             'value': 1456963206189,
         },
-        'max' : {
+        'max': {
             'value': 1457049599152,
             'value_as_string': '2016-03-03T23:59:59.152Z',
+        },
+    }
+}
+
+shards = {
+    'indices': {
+        named_index: {
+            'shards': {
+                '0': [{'num_search_segments': 15}, {'num_search_segments': 21}],
+                '1': [{'num_search_segments': 19}, {'num_search_segments': 16}],
+            }
+        }
+    }
+}
+fm_shards = {
+    'indices': {
+        named_index: {
+            'shards': {
+                '0': [{'num_search_segments': 1}, {'num_search_segments': 1}],
+                '1': [{'num_search_segments': 1}, {'num_search_segments': 1}],
+            }
         }
     }
 }
 
-shards         = { 'indices': { named_index: { 'shards': {
-        '0': [ { 'num_search_segments' : 15 }, { 'num_search_segments' : 21 } ],
-        '1': [ { 'num_search_segments' : 19 }, { 'num_search_segments' : 16 } ] }}}}
-fm_shards      = { 'indices': { named_index: { 'shards': {
-        '0': [ { 'num_search_segments' : 1 }, { 'num_search_segments' : 1 } ],
-        '1': [ { 'num_search_segments' : 1 }, { 'num_search_segments' : 1 } ] }}}}
-
-loginfo        =    {   "loglevel": "INFO",
-                        "logfile": None,
-                        "logformat": "default"
-                    }
+loginfo = {"loglevel": "INFO", "logfile": None, "logformat": "default"}
 default_format = '%(asctime)s %(levelname)-9s %(message)s'
-debug_format   = '%(asctime)s %(levelname)-9s %(name)22s %(funcName)22s:%(lineno)-4d %(message)s'
+debug_format = (
+    '%(asctime)s %(levelname)-9s %(name)22s %(funcName)22s:%(lineno)-4d %(message)s'
+)
 
-yamlconfig     = '''
+yamlconfig = '''
 ---
 # Remember, leave a key empty to use the default value.  None will be a string,
 # not a Python "NoneType"
@@ -586,7 +796,7 @@ options:
   logformat: default
   quiet: False
 '''
-pattern_ft     = '''
+pattern_ft = '''
 ---
 actions:
   1:
@@ -601,7 +811,7 @@ actions:
         value: a
         exclude: False
 '''
-age_ft         = '''
+age_ft = '''
 ---
 actions:
   1:
@@ -619,7 +829,7 @@ actions:
         unit_count: 0
         epoch: 1456963201
 '''
-space_ft         = '''
+space_ft = '''
 ---
 actions:
   1:
@@ -635,7 +845,7 @@ actions:
         use_age: True
         timestring: '%Y.%m.%d'
 '''
-forcemerge_ft  = '''
+forcemerge_ft = '''
 ---
 actions:
   1:
@@ -648,7 +858,7 @@ actions:
       - filtertype: forcemerged
         max_num_segments: 2
 '''
-allocated_ft   = '''
+allocated_ft = '''
 ---
 actions:
   1:
@@ -663,7 +873,7 @@ actions:
         value: foo
         allocation_type: include
 '''
-kibana_ft      = '''
+kibana_ft = '''
 ---
 actions:
   1:
@@ -675,7 +885,7 @@ actions:
     filters:
       - filtertype: kibana
 '''
-opened_ft      = '''
+opened_ft = '''
 ---
 actions:
   1:
@@ -687,7 +897,7 @@ actions:
     filters:
       - filtertype: opened
 '''
-closed_ft     = '''
+closed_ft = '''
 ---
 actions:
   1:
@@ -699,7 +909,7 @@ actions:
     filters:
       - filtertype: closed
 '''
-none_ft        = '''
+none_ft = '''
 ---
 actions:
   1:
@@ -711,7 +921,7 @@ actions:
     filters:
       - filtertype: none
 '''
-invalid_ft     = '''
+invalid_ft = '''
 ---
 actions:
   1:
@@ -723,7 +933,7 @@ actions:
     filters:
       - filtertype: sir_not_appearing_in_this_film
 '''
-snap_age_ft    = '''
+snap_age_ft = '''
 ---
 actions:
   1:
@@ -738,7 +948,7 @@ actions:
         unit: days
         unit_count: 1
 '''
-snap_pattern_ft= '''
+snap_pattern_ft = '''
 ---
 actions:
   1:
@@ -752,7 +962,7 @@ actions:
         kind: prefix
         value: sna
 '''
-snap_none_ft  = '''
+snap_none_ft = '''
 ---
 actions:
   1:
@@ -764,7 +974,7 @@ actions:
     filters:
       - filtertype: none
 '''
-size_ft     = '''
+size_ft = '''
 ---
 actions:
   1:
@@ -781,25 +991,709 @@ actions:
 '''
 
 generic_task = {'task': 'I0ekFjMhSPCQz7FUs1zJOg:54510686'}
-incomplete_task = {'completed': False, 'task': {'node': 'I0ekFjMhSPCQz7FUs1zJOg', 'status': {'retries': {'bulk': 0, 'search': 0}, 'updated': 0, 'batches': 3647, 'throttled_until_millis': 0, 'throttled_millis': 0, 'noops': 0, 'created': 3646581, 'deleted': 0, 'requests_per_second': -1.0, 'version_conflicts': 0, 'total': 3646581}, 'description': 'UNIT TEST', 'running_time_in_nanos': 1637039537721, 'cancellable': True, 'action': 'indices:data/write/reindex', 'type': 'transport', 'id': 54510686, 'start_time_in_millis': 1489695981997}, 'response': {'retries': {'bulk': 0, 'search': 0}, 'updated': 0, 'batches': 3647, 'throttled_until_millis': 0, 'throttled_millis': 0, 'noops': 0, 'created': 3646581, 'deleted': 0, 'took': 1636917, 'requests_per_second': -1.0, 'timed_out': False, 'failures': [], 'version_conflicts': 0, 'total': 3646581}}
-completed_task = {'completed': True, 'task': {'node': 'I0ekFjMhSPCQz7FUs1zJOg', 'status': {'retries': {'bulk': 0, 'search': 0}, 'updated': 0, 'batches': 3647, 'throttled_until_millis': 0, 'throttled_millis': 0, 'noops': 0, 'created': 3646581, 'deleted': 0, 'requests_per_second': -1.0, 'version_conflicts': 0, 'total': 3646581}, 'description': 'UNIT TEST', 'running_time_in_nanos': 1637039537721, 'cancellable': True, 'action': 'indices:data/write/reindex', 'type': 'transport', 'id': 54510686, 'start_time_in_millis': 1489695981997}, 'response': {'retries': {'bulk': 0, 'search': 0}, 'updated': 0, 'batches': 3647, 'throttled_until_millis': 0, 'throttled_millis': 0, 'noops': 0, 'created': 3646581, 'deleted': 0, 'took': 1636917, 'requests_per_second': -1.0, 'timed_out': False, 'failures': [], 'version_conflicts': 0, 'total': 3646581}}
-completed_task_zero_total = {'completed': True, 'task': {'node': 'I0ekFjMhSPCQz7FUs1zJOg', 'status': {'retries': {'bulk': 0, 'search': 0}, 'updated': 0, 'batches': 0, 'throttled_until_millis': 0, 'throttled_millis': 0, 'noops': 0, 'created': 0, 'deleted': 0, 'requests_per_second': -1.0, 'version_conflicts': 0, 'total': 0}, 'description': 'UNIT TEST', 'running_time_in_nanos': 1637039537721, 'cancellable': True, 'action': 'indices:data/write/reindex', 'type': 'transport', 'id': 54510686, 'start_time_in_millis': 1489695981997}, 'response': {'retries': {'bulk': 0, 'search': 0}, 'updated': 0, 'batches': 0, 'throttled_until_millis': 0, 'throttled_millis': 0, 'noops': 0, 'created': 0, 'deleted': 0, 'took': 1636917, 'requests_per_second': -1.0, 'timed_out': False, 'failures': [], 'version_conflicts': 0, 'total': 0}}
-recovery_output = {'index-2015.01.01': {'shards' : [{'stage':'DONE'}]}, 'index-2015.02.01': {'shards' : [{'stage':'DONE'}]}}
-unrecovered_output = {'index-2015.01.01': {'shards' : [{'stage':'INDEX'}]}, 'index-2015.02.01': {'shards' : [{'stage':'INDEX'}]}}
-cluster_health = { "cluster_name": "unit_test", "status": "green", "timed_out": False, "number_of_nodes": 7, "number_of_data_nodes": 3, "active_primary_shards": 235, "active_shards": 471, "relocating_shards": 0, "initializing_shards": 0, "unassigned_shards": 0, "delayed_unassigned_shards": 0, "number_of_pending_tasks": 0,  "task_max_waiting_in_queue_millis": 0, "active_shards_percent_as_number": 100}
-reindex_basic = { 'source': { 'index': named_index }, 'dest': { 'index': 'other_index' } }
-reindex_replace = { 'source': { 'index': 'REINDEX_SELECTION' }, 'dest': { 'index': 'other_index' } }
-reindex_migration = { 'source': { 'index': named_index }, 'dest': { 'index': 'MIGRATION' } }
+incomplete_task = {
+    'completed': False,
+    'task': {
+        'node': 'I0ekFjMhSPCQz7FUs1zJOg',
+        'status': {
+            'retries': {'bulk': 0, 'search': 0},
+            'updated': 0,
+            'batches': 3647,
+            'throttled_until_millis': 0,
+            'throttled_millis': 0,
+            'noops': 0,
+            'created': 3646581,
+            'deleted': 0,
+            'requests_per_second': -1.0,
+            'version_conflicts': 0,
+            'total': 3646581,
+        },
+        'description': 'UNIT TEST',
+        'running_time_in_nanos': 1637039537721,
+        'cancellable': True,
+        'action': 'indices:data/write/reindex',
+        'type': 'transport',
+        'id': 54510686,
+        'start_time_in_millis': 1489695981997,
+    },
+    'response': {
+        'retries': {'bulk': 0, 'search': 0},
+        'updated': 0,
+        'batches': 3647,
+        'throttled_until_millis': 0,
+        'throttled_millis': 0,
+        'noops': 0,
+        'created': 3646581,
+        'deleted': 0,
+        'took': 1636917,
+        'requests_per_second': -1.0,
+        'timed_out': False,
+        'failures': [],
+        'version_conflicts': 0,
+        'total': 3646581,
+    },
+}
+completed_task = {
+    'completed': True,
+    'task': {
+        'node': 'I0ekFjMhSPCQz7FUs1zJOg',
+        'status': {
+            'retries': {'bulk': 0, 'search': 0},
+            'updated': 0,
+            'batches': 3647,
+            'throttled_until_millis': 0,
+            'throttled_millis': 0,
+            'noops': 0,
+            'created': 3646581,
+            'deleted': 0,
+            'requests_per_second': -1.0,
+            'version_conflicts': 0,
+            'total': 3646581,
+        },
+        'description': 'UNIT TEST',
+        'running_time_in_nanos': 1637039537721,
+        'cancellable': True,
+        'action': 'indices:data/write/reindex',
+        'type': 'transport',
+        'id': 54510686,
+        'start_time_in_millis': 1489695981997,
+    },
+    'response': {
+        'retries': {'bulk': 0, 'search': 0},
+        'updated': 0,
+        'batches': 3647,
+        'throttled_until_millis': 0,
+        'throttled_millis': 0,
+        'noops': 0,
+        'created': 3646581,
+        'deleted': 0,
+        'took': 1636917,
+        'requests_per_second': -1.0,
+        'timed_out': False,
+        'failures': [],
+        'version_conflicts': 0,
+        'total': 3646581,
+    },
+}
+completed_task_zero_total = {
+    'completed': True,
+    'task': {
+        'node': 'I0ekFjMhSPCQz7FUs1zJOg',
+        'status': {
+            'retries': {'bulk': 0, 'search': 0},
+            'updated': 0,
+            'batches': 0,
+            'throttled_until_millis': 0,
+            'throttled_millis': 0,
+            'noops': 0,
+            'created': 0,
+            'deleted': 0,
+            'requests_per_second': -1.0,
+            'version_conflicts': 0,
+            'total': 0,
+        },
+        'description': 'UNIT TEST',
+        'running_time_in_nanos': 1637039537721,
+        'cancellable': True,
+        'action': 'indices:data/write/reindex',
+        'type': 'transport',
+        'id': 54510686,
+        'start_time_in_millis': 1489695981997,
+    },
+    'response': {
+        'retries': {'bulk': 0, 'search': 0},
+        'updated': 0,
+        'batches': 0,
+        'throttled_until_millis': 0,
+        'throttled_millis': 0,
+        'noops': 0,
+        'created': 0,
+        'deleted': 0,
+        'took': 1636917,
+        'requests_per_second': -1.0,
+        'timed_out': False,
+        'failures': [],
+        'version_conflicts': 0,
+        'total': 0,
+    },
+}
+recovery_output = {
+    'index-2015.01.01': {'shards': [{'stage': 'DONE'}]},
+    'index-2015.02.01': {'shards': [{'stage': 'DONE'}]},
+}
+unrecovered_output = {
+    'index-2015.01.01': {'shards': [{'stage': 'INDEX'}]},
+    'index-2015.02.01': {'shards': [{'stage': 'INDEX'}]},
+}
+cluster_health = {
+    "cluster_name": "unit_test",
+    "status": "green",
+    "timed_out": False,
+    "number_of_nodes": 7,
+    "number_of_data_nodes": 3,
+    "active_primary_shards": 235,
+    "active_shards": 471,
+    "relocating_shards": 0,
+    "initializing_shards": 0,
+    "unassigned_shards": 0,
+    "delayed_unassigned_shards": 0,
+    "number_of_pending_tasks": 0,
+    "task_max_waiting_in_queue_millis": 0,
+    "active_shards_percent_as_number": 100,
+}
+reindex_basic = {'source': {'index': named_index}, 'dest': {'index': 'other_index'}}
+reindex_replace = {
+    'source': {'index': 'REINDEX_SELECTION'},
+    'dest': {'index': 'other_index'},
+}
+reindex_migration = {'source': {'index': named_index}, 'dest': {'index': 'MIGRATION'}}
 index_list_966 = ['indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d']
-recovery_966 = {'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d': {'shards': [{'total_time': '10.1m', 'index': {'files': {'reused': 0, 'total': 15, 'percent': '100.0%', 'recovered': 15}, 'total_time': '10.1m', 'target_throttle_time': '-1', 'total_time_in_millis': 606577, 'source_throttle_time_in_millis': 0, 'source_throttle_time': '-1', 'target_throttle_time_in_millis': 0, 'size': {'recovered_in_bytes': 3171596177, 'reused': '0b', 'total_in_bytes': 3171596177, 'percent': '100.0%', 'reused_in_bytes': 0, 'total': '2.9gb', 'recovered': '2.9gb'}}, 'verify_index': {'total_time': '0s', 'total_time_in_millis': 0, 'check_index_time_in_millis': 0, 'check_index_time': '0s'}, 'target': {'ip': 'x.x.x.7', 'host': 'x.x.x.7', 'transport_address': 'x.x.x.7:9300', 'id': 'K4xQPaOFSWSPLwhb0P47aQ', 'name': 'staging-es5-forcem'}, 'source': {'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', 'version': '5.1.1', 'snapshot': 'force-merge', 'repository': 'force-merge'}, 'translog': {'total_time': '45ms', 'percent': '100.0%', 'total_time_in_millis': 45, 'total_on_start': 0, 'total': 0, 'recovered': 0}, 'start_time': '2017-05-16T11:54:48.183Z', 'primary': True, 'total_time_in_millis': 606631, 'stop_time_in_millis': 1494936294815, 'stop_time': '2017-05-16T12:04:54.815Z', 'stage': 'DONE', 'type': 'SNAPSHOT', 'id': 1, 'start_time_in_millis': 1494935688183}, {'total_time': '10m', 'index': {'files': {'reused': 0, 'total': 15, 'percent': '100.0%', 'recovered': 15}, 'total_time': '10m', 'target_throttle_time': '-1', 'total_time_in_millis': 602302, 'source_throttle_time_in_millis': 0, 'source_throttle_time': '-1', 'target_throttle_time_in_millis': 0, 'size': {'recovered_in_bytes': 3162299781, 'reused': '0b', 'total_in_bytes': 3162299781, 'percent': '100.0%', 'reused_in_bytes': 0, 'total': '2.9gb', 'recovered': '2.9gb'}}, 'verify_index': {'total_time': '0s', 'total_time_in_millis': 0, 'check_index_time_in_millis': 0, 'check_index_time': '0s'}, 'target': {'ip': 'x.x.x.7', 'host': 'x.x.x.7', 'transport_address': 'x.x.x.7:9300', 'id': 'K4xQPaOFSWSPLwhb0P47aQ', 'name': 'staging-es5-forcem'}, 'source': {'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', 'version': '5.1.1', 'snapshot': 'force-merge', 'repository': 'force-merge'}, 'translog': {'total_time': '389ms', 'percent': '100.0%', 'total_time_in_millis': 389, 'total_on_start': 0, 'total': 0, 'recovered': 0}, 'start_time': '2017-05-16T12:04:51.606Z', 'primary': True, 'total_time_in_millis': 602698, 'stop_time_in_millis': 1494936894305, 'stop_time': '2017-05-16T12:14:54.305Z', 'stage': 'DONE', 'type': 'SNAPSHOT', 'id': 5, 'start_time_in_millis': 1494936291606}, {'total_time': '10.1m', 'index': {'files': {'reused': 0, 'total': 15, 'percent': '100.0%', 'recovered': 15}, 'total_time': '10.1m', 'target_throttle_time': '-1', 'total_time_in_millis': 606692, 'source_throttle_time_in_millis': 0, 'source_throttle_time': '-1', 'target_throttle_time_in_millis': 0, 'size': {'recovered_in_bytes': 3156050994, 'reused': '0b', 'total_in_bytes': 3156050994, 'percent': '100.0%', 'reused_in_bytes': 0, 'total': '2.9gb', 'recovered': '2.9gb'}}, 'verify_index': {'total_time': '0s', 'total_time_in_millis': 0, 'check_index_time_in_millis': 0, 'check_index_time': '0s'}, 'target': {'ip': 'x.x.x.7', 'host': 'x.x.x.7', 'transport_address': 'x.x.x.7:9300', 'id': 'K4xQPaOFSWSPLwhb0P47aQ', 'name': 'staging-es5-forcem'}, 'source': {'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', 'version': '5.1.1', 'snapshot': 'force-merge', 'repository': 'force-merge'}, 'translog': {'total_time': '38ms', 'percent': '100.0%', 'total_time_in_millis': 38, 'total_on_start': 0, 'total': 0, 'recovered': 0}, 'start_time': '2017-05-16T11:54:48.166Z', 'primary': True, 'total_time_in_millis': 606737, 'stop_time_in_millis': 1494936294904, 'stop_time': '2017-05-16T12:04:54.904Z', 'stage': 'DONE', 'type': 'SNAPSHOT', 'id': 3, 'start_time_in_millis': 1494935688166}, {'total_time': '10m', 'index': {'files': {'reused': 0, 'total': 15, 'percent': '100.0%', 'recovered': 15}, 'total_time': '10m', 'target_throttle_time': '-1', 'total_time_in_millis': 602010, 'source_throttle_time_in_millis': 0, 'source_throttle_time': '-1', 'target_throttle_time_in_millis': 0, 'size': {'recovered_in_bytes': 3153017440, 'reused': '0b', 'total_in_bytes': 3153017440, 'percent': '100.0%', 'reused_in_bytes': 0, 'total': '2.9gb', 'recovered': '2.9gb'}}, 'verify_index': {'total_time': '0s', 'total_time_in_millis': 0, 'check_index_time_in_millis': 0, 'check_index_time': '0s'}, 'target': {'ip': 'x.x.x.7', 'host': 'x.x.x.7', 'transport_address': 'x.x.x.7:9300', 'id': 'K4xQPaOFSWSPLwhb0P47aQ', 'name': 'staging-es5-forcem'}, 'source': {'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', 'version': '5.1.1', 'snapshot': 'force-merge', 'repository': 'force-merge'}, 'translog': {'total_time': '558ms', 'percent': '100.0%', 'total_time_in_millis': 558, 'total_on_start': 0, 'total': 0, 'recovered': 0}, 'start_time': '2017-05-16T12:04:51.369Z', 'primary': True, 'total_time_in_millis': 602575, 'stop_time_in_millis': 1494936893944, 'stop_time': '2017-05-16T12:14:53.944Z', 'stage': 'DONE', 'type': 'SNAPSHOT', 'id': 4, 'start_time_in_millis': 1494936291369}, {'total_time': '10m', 'index': {'files': {'reused': 0, 'total': 15, 'percent': '100.0%', 'recovered': 15}, 'total_time': '10m', 'target_throttle_time': '-1', 'total_time_in_millis': 600492, 'source_throttle_time_in_millis': 0, 'source_throttle_time': '-1', 'target_throttle_time_in_millis': 0, 'size': {'recovered_in_bytes': 3153347402, 'reused': '0b', 'total_in_bytes': 3153347402, 'percent': '100.0%', 'reused_in_bytes': 0, 'total': '2.9gb', 'recovered': '2.9gb'}}, 'verify_index': {'total_time': '0s', 'total_time_in_millis': 0, 'check_index_time_in_millis': 0, 'check_index_time': '0s'}, 'target': {'ip': 'x.x.x.7', 'host': 'x.x.x.7', 'transport_address': 'x.x.x.7:9300', 'id': 'K4xQPaOFSWSPLwhb0P47aQ', 'name': 'staging-es5-forcem'}, 'source': {'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', 'version': '5.1.1', 'snapshot': 'force-merge', 'repository': 'force-merge'}, 'translog': {'total_time': '445ms', 'percent': '100.0%', 'total_time_in_millis': 445, 'total_on_start': 0, 'total': 0, 'recovered': 0}, 'start_time': '2017-05-16T12:04:54.817Z', 'primary': True, 'total_time_in_millis': 600946, 'stop_time_in_millis': 1494936895764, 'stop_time': '2017-05-16T12:14:55.764Z', 'stage': 'DONE', 'type': 'SNAPSHOT', 'id': 6, 'start_time_in_millis': 1494936294817}, {'total_time': '10m', 'index': {'files': {'reused': 0, 'total': 15, 'percent': '100.0%', 'recovered': 15}, 'total_time': '10m', 'target_throttle_time': '-1', 'total_time_in_millis': 603194, 'source_throttle_time_in_millis': 0, 'source_throttle_time': '-1', 'target_throttle_time_in_millis': 0, 'size': {'recovered_in_bytes': 3148003580, 'reused': '0b', 'total_in_bytes': 3148003580, 'percent': '100.0%', 'reused_in_bytes': 0, 'total': '2.9gb', 'recovered': '2.9gb'}}, 'verify_index': {'total_time': '0s', 'total_time_in_millis': 0, 'check_index_time_in_millis': 0, 'check_index_time': '0s'}, 'target': {'ip': 'x.x.x.7', 'host': 'x.x.x.7', 'transport_address': 'x.x.x.7:9300', 'id': 'K4xQPaOFSWSPLwhb0P47aQ', 'name': 'staging-es5-forcem'}, 'source': {'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', 'version': '5.1.1', 'snapshot': 'force-merge', 'repository': 'force-merge'}, 'translog': {'total_time': '225ms', 'percent': '100.0%', 'total_time_in_millis': 225, 'total_on_start': 0, 'total': 0, 'recovered': 0}, 'start_time': '2017-05-16T11:54:48.173Z', 'primary': True, 'total_time_in_millis': 603429, 'stop_time_in_millis': 1494936291602, 'stop_time': '2017-05-16T12:04:51.602Z', 'stage': 'DONE', 'type': 'SNAPSHOT', 'id': 2, 'start_time_in_millis': 1494935688173}, {'total_time': '10m', 'index': {'files': {'reused': 0, 'total': 15, 'percent': '100.0%', 'recovered': 15}, 'total_time': '10m', 'target_throttle_time': '-1', 'total_time_in_millis': 601453, 'source_throttle_time_in_millis': 0, 'source_throttle_time': '-1', 'target_throttle_time_in_millis': 0, 'size': {'recovered_in_bytes': 3168132171, 'reused': '0b', 'total_in_bytes': 3168132171, 'percent': '100.0%', 'reused_in_bytes': 0, 'total': '2.9gb', 'recovered': '2.9gb'}}, 'verify_index': {'total_time': '0s', 'total_time_in_millis': 0, 'check_index_time_in_millis': 0, 'check_index_time': '0s'}, 'target': {'ip': 'x.x.x.7', 'host': 'x.x.x.7', 'transport_address': 'x.x.x.7:9300', 'id': 'K4xQPaOFSWSPLwhb0P47aQ', 'name': 'staging-es5-forcem'}, 'source': {'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', 'version': '5.1.1', 'snapshot': 'force-merge', 'repository': 'force-merge'}, 'translog': {'total_time': '43ms', 'percent': '100.0%', 'total_time_in_millis': 43, 'total_on_start': 0, 'total': 0, 'recovered': 0}, 'start_time': '2017-05-16T12:04:54.905Z', 'primary': True, 'total_time_in_millis': 601503, 'stop_time_in_millis': 1494936896408, 'stop_time': '2017-05-16T12:14:56.408Z', 'stage': 'DONE', 'type': 'SNAPSHOT', 'id': 7, 'start_time_in_millis': 1494936294905}, {'total_time': '10m', 'index': {'files': {'reused': 0, 'total': 15, 'percent': '100.0%', 'recovered': 15}, 'total_time': '10m', 'target_throttle_time': '-1', 'total_time_in_millis': 602897, 'source_throttle_time_in_millis': 0, 'source_throttle_time': '-1', 'target_throttle_time_in_millis': 0, 'size': {'recovered_in_bytes': 3153750393, 'reused': '0b', 'total_in_bytes': 3153750393, 'percent': '100.0%', 'reused_in_bytes': 0, 'total': '2.9gb', 'recovered': '2.9gb'}}, 'verify_index': {'total_time': '0s', 'total_time_in_millis': 0, 'check_index_time_in_millis': 0, 'check_index_time': '0s'}, 'target': {'ip': 'x.x.x.7', 'host': 'x.x.x.7', 'transport_address': 'x.x.x.7:9300', 'id': 'K4xQPaOFSWSPLwhb0P47aQ', 'name': 'staging-es5-forcem'}, 'source': {'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', 'version': '5.1.1', 'snapshot': 'force-merge', 'repository': 'force-merge'}, 'translog': {'total_time': '271ms', 'percent': '100.0%', 'total_time_in_millis': 271, 'total_on_start': 0, 'total': 0, 'recovered': 0}, 'start_time': '2017-05-16T11:54:48.191Z', 'primary': True, 'total_time_in_millis': 603174, 'stop_time_in_millis': 1494936291366, 'stop_time': '2017-05-16T12:04:51.366Z', 'stage': 'DONE', 'type': 'SNAPSHOT', 'id': 0, 'start_time_in_millis': 1494935688191}]}}
-no_snap_tasks = {'nodes': {'node1': {'tasks': {'task1': {'action': 'cluster:monitor/tasks/lists[n]'}}}}}
-snap_task = {'nodes': {'node1': {'tasks': {'task1': {'action': 'cluster:admin/snapshot/delete'}}}}}
-watermark_persistent = {'persistent':{'cluster':{'routing':{'allocation':{'disk':{'watermark':{'low':'11%','high':'60gb'}}}}}}}
-watermark_transient = {'transient':{'cluster':{'routing':{'allocation':{'disk':{'watermark':{'low':'9%','high':'50gb'}}}}}}}
+recovery_966 = {
+    'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d': {
+        'shards': [
+            {
+                'total_time': '10.1m',
+                'index': {
+                    'files': {
+                        'reused': 0,
+                        'total': 15,
+                        'percent': '100.0%',
+                        'recovered': 15,
+                    },
+                    'total_time': '10.1m',
+                    'target_throttle_time': '-1',
+                    'total_time_in_millis': 606577,
+                    'source_throttle_time_in_millis': 0,
+                    'source_throttle_time': '-1',
+                    'target_throttle_time_in_millis': 0,
+                    'size': {
+                        'recovered_in_bytes': 3171596177,
+                        'reused': '0b',
+                        'total_in_bytes': 3171596177,
+                        'percent': '100.0%',
+                        'reused_in_bytes': 0,
+                        'total': '2.9gb',
+                        'recovered': '2.9gb',
+                    },
+                },
+                'verify_index': {
+                    'total_time': '0s',
+                    'total_time_in_millis': 0,
+                    'check_index_time_in_millis': 0,
+                    'check_index_time': '0s',
+                },
+                'target': {
+                    'ip': 'x.x.x.7',
+                    'host': 'x.x.x.7',
+                    'transport_address': 'x.x.x.7:9300',
+                    'id': 'K4xQPaOFSWSPLwhb0P47aQ',
+                    'name': 'staging-es5-forcem',
+                },
+                'source': {
+                    'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d',
+                    'version': '5.1.1',
+                    'snapshot': 'force-merge',
+                    'repository': 'force-merge',
+                },
+                'translog': {
+                    'total_time': '45ms',
+                    'percent': '100.0%',
+                    'total_time_in_millis': 45,
+                    'total_on_start': 0,
+                    'total': 0,
+                    'recovered': 0,
+                },
+                'start_time': '2017-05-16T11:54:48.183Z',
+                'primary': True,
+                'total_time_in_millis': 606631,
+                'stop_time_in_millis': 1494936294815,
+                'stop_time': '2017-05-16T12:04:54.815Z',
+                'stage': 'DONE',
+                'type': 'SNAPSHOT',
+                'id': 1,
+                'start_time_in_millis': 1494935688183,
+            },
+            {
+                'total_time': '10m',
+                'index': {
+                    'files': {
+                        'reused': 0,
+                        'total': 15,
+                        'percent': '100.0%',
+                        'recovered': 15,
+                    },
+                    'total_time': '10m',
+                    'target_throttle_time': '-1',
+                    'total_time_in_millis': 602302,
+                    'source_throttle_time_in_millis': 0,
+                    'source_throttle_time': '-1',
+                    'target_throttle_time_in_millis': 0,
+                    'size': {
+                        'recovered_in_bytes': 3162299781,
+                        'reused': '0b',
+                        'total_in_bytes': 3162299781,
+                        'percent': '100.0%',
+                        'reused_in_bytes': 0,
+                        'total': '2.9gb',
+                        'recovered': '2.9gb',
+                    },
+                },
+                'verify_index': {
+                    'total_time': '0s',
+                    'total_time_in_millis': 0,
+                    'check_index_time_in_millis': 0,
+                    'check_index_time': '0s',
+                },
+                'target': {
+                    'ip': 'x.x.x.7',
+                    'host': 'x.x.x.7',
+                    'transport_address': 'x.x.x.7:9300',
+                    'id': 'K4xQPaOFSWSPLwhb0P47aQ',
+                    'name': 'staging-es5-forcem',
+                },
+                'source': {
+                    'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d',
+                    'version': '5.1.1',
+                    'snapshot': 'force-merge',
+                    'repository': 'force-merge',
+                },
+                'translog': {
+                    'total_time': '389ms',
+                    'percent': '100.0%',
+                    'total_time_in_millis': 389,
+                    'total_on_start': 0,
+                    'total': 0,
+                    'recovered': 0,
+                },
+                'start_time': '2017-05-16T12:04:51.606Z',
+                'primary': True,
+                'total_time_in_millis': 602698,
+                'stop_time_in_millis': 1494936894305,
+                'stop_time': '2017-05-16T12:14:54.305Z',
+                'stage': 'DONE',
+                'type': 'SNAPSHOT',
+                'id': 5,
+                'start_time_in_millis': 1494936291606,
+            },
+            {
+                'total_time': '10.1m',
+                'index': {
+                    'files': {
+                        'reused': 0,
+                        'total': 15,
+                        'percent': '100.0%',
+                        'recovered': 15,
+                    },
+                    'total_time': '10.1m',
+                    'target_throttle_time': '-1',
+                    'total_time_in_millis': 606692,
+                    'source_throttle_time_in_millis': 0,
+                    'source_throttle_time': '-1',
+                    'target_throttle_time_in_millis': 0,
+                    'size': {
+                        'recovered_in_bytes': 3156050994,
+                        'reused': '0b',
+                        'total_in_bytes': 3156050994,
+                        'percent': '100.0%',
+                        'reused_in_bytes': 0,
+                        'total': '2.9gb',
+                        'recovered': '2.9gb',
+                    },
+                },
+                'verify_index': {
+                    'total_time': '0s',
+                    'total_time_in_millis': 0,
+                    'check_index_time_in_millis': 0,
+                    'check_index_time': '0s',
+                },
+                'target': {
+                    'ip': 'x.x.x.7',
+                    'host': 'x.x.x.7',
+                    'transport_address': 'x.x.x.7:9300',
+                    'id': 'K4xQPaOFSWSPLwhb0P47aQ',
+                    'name': 'staging-es5-forcem',
+                },
+                'source': {
+                    'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d',
+                    'version': '5.1.1',
+                    'snapshot': 'force-merge',
+                    'repository': 'force-merge',
+                },
+                'translog': {
+                    'total_time': '38ms',
+                    'percent': '100.0%',
+                    'total_time_in_millis': 38,
+                    'total_on_start': 0,
+                    'total': 0,
+                    'recovered': 0,
+                },
+                'start_time': '2017-05-16T11:54:48.166Z',
+                'primary': True,
+                'total_time_in_millis': 606737,
+                'stop_time_in_millis': 1494936294904,
+                'stop_time': '2017-05-16T12:04:54.904Z',
+                'stage': 'DONE',
+                'type': 'SNAPSHOT',
+                'id': 3,
+                'start_time_in_millis': 1494935688166,
+            },
+            {
+                'total_time': '10m',
+                'index': {
+                    'files': {
+                        'reused': 0,
+                        'total': 15,
+                        'percent': '100.0%',
+                        'recovered': 15,
+                    },
+                    'total_time': '10m',
+                    'target_throttle_time': '-1',
+                    'total_time_in_millis': 602010,
+                    'source_throttle_time_in_millis': 0,
+                    'source_throttle_time': '-1',
+                    'target_throttle_time_in_millis': 0,
+                    'size': {
+                        'recovered_in_bytes': 3153017440,
+                        'reused': '0b',
+                        'total_in_bytes': 3153017440,
+                        'percent': '100.0%',
+                        'reused_in_bytes': 0,
+                        'total': '2.9gb',
+                        'recovered': '2.9gb',
+                    },
+                },
+                'verify_index': {
+                    'total_time': '0s',
+                    'total_time_in_millis': 0,
+                    'check_index_time_in_millis': 0,
+                    'check_index_time': '0s',
+                },
+                'target': {
+                    'ip': 'x.x.x.7',
+                    'host': 'x.x.x.7',
+                    'transport_address': 'x.x.x.7:9300',
+                    'id': 'K4xQPaOFSWSPLwhb0P47aQ',
+                    'name': 'staging-es5-forcem',
+                },
+                'source': {
+                    'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d',
+                    'version': '5.1.1',
+                    'snapshot': 'force-merge',
+                    'repository': 'force-merge',
+                },
+                'translog': {
+                    'total_time': '558ms',
+                    'percent': '100.0%',
+                    'total_time_in_millis': 558,
+                    'total_on_start': 0,
+                    'total': 0,
+                    'recovered': 0,
+                },
+                'start_time': '2017-05-16T12:04:51.369Z',
+                'primary': True,
+                'total_time_in_millis': 602575,
+                'stop_time_in_millis': 1494936893944,
+                'stop_time': '2017-05-16T12:14:53.944Z',
+                'stage': 'DONE',
+                'type': 'SNAPSHOT',
+                'id': 4,
+                'start_time_in_millis': 1494936291369,
+            },
+            {
+                'total_time': '10m',
+                'index': {
+                    'files': {
+                        'reused': 0,
+                        'total': 15,
+                        'percent': '100.0%',
+                        'recovered': 15,
+                    },
+                    'total_time': '10m',
+                    'target_throttle_time': '-1',
+                    'total_time_in_millis': 600492,
+                    'source_throttle_time_in_millis': 0,
+                    'source_throttle_time': '-1',
+                    'target_throttle_time_in_millis': 0,
+                    'size': {
+                        'recovered_in_bytes': 3153347402,
+                        'reused': '0b',
+                        'total_in_bytes': 3153347402,
+                        'percent': '100.0%',
+                        'reused_in_bytes': 0,
+                        'total': '2.9gb',
+                        'recovered': '2.9gb',
+                    },
+                },
+                'verify_index': {
+                    'total_time': '0s',
+                    'total_time_in_millis': 0,
+                    'check_index_time_in_millis': 0,
+                    'check_index_time': '0s',
+                },
+                'target': {
+                    'ip': 'x.x.x.7',
+                    'host': 'x.x.x.7',
+                    'transport_address': 'x.x.x.7:9300',
+                    'id': 'K4xQPaOFSWSPLwhb0P47aQ',
+                    'name': 'staging-es5-forcem',
+                },
+                'source': {
+                    'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d',
+                    'version': '5.1.1',
+                    'snapshot': 'force-merge',
+                    'repository': 'force-merge',
+                },
+                'translog': {
+                    'total_time': '445ms',
+                    'percent': '100.0%',
+                    'total_time_in_millis': 445,
+                    'total_on_start': 0,
+                    'total': 0,
+                    'recovered': 0,
+                },
+                'start_time': '2017-05-16T12:04:54.817Z',
+                'primary': True,
+                'total_time_in_millis': 600946,
+                'stop_time_in_millis': 1494936895764,
+                'stop_time': '2017-05-16T12:14:55.764Z',
+                'stage': 'DONE',
+                'type': 'SNAPSHOT',
+                'id': 6,
+                'start_time_in_millis': 1494936294817,
+            },
+            {
+                'total_time': '10m',
+                'index': {
+                    'files': {
+                        'reused': 0,
+                        'total': 15,
+                        'percent': '100.0%',
+                        'recovered': 15,
+                    },
+                    'total_time': '10m',
+                    'target_throttle_time': '-1',
+                    'total_time_in_millis': 603194,
+                    'source_throttle_time_in_millis': 0,
+                    'source_throttle_time': '-1',
+                    'target_throttle_time_in_millis': 0,
+                    'size': {
+                        'recovered_in_bytes': 3148003580,
+                        'reused': '0b',
+                        'total_in_bytes': 3148003580,
+                        'percent': '100.0%',
+                        'reused_in_bytes': 0,
+                        'total': '2.9gb',
+                        'recovered': '2.9gb',
+                    },
+                },
+                'verify_index': {
+                    'total_time': '0s',
+                    'total_time_in_millis': 0,
+                    'check_index_time_in_millis': 0,
+                    'check_index_time': '0s',
+                },
+                'target': {
+                    'ip': 'x.x.x.7',
+                    'host': 'x.x.x.7',
+                    'transport_address': 'x.x.x.7:9300',
+                    'id': 'K4xQPaOFSWSPLwhb0P47aQ',
+                    'name': 'staging-es5-forcem',
+                },
+                'source': {
+                    'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d',
+                    'version': '5.1.1',
+                    'snapshot': 'force-merge',
+                    'repository': 'force-merge',
+                },
+                'translog': {
+                    'total_time': '225ms',
+                    'percent': '100.0%',
+                    'total_time_in_millis': 225,
+                    'total_on_start': 0,
+                    'total': 0,
+                    'recovered': 0,
+                },
+                'start_time': '2017-05-16T11:54:48.173Z',
+                'primary': True,
+                'total_time_in_millis': 603429,
+                'stop_time_in_millis': 1494936291602,
+                'stop_time': '2017-05-16T12:04:51.602Z',
+                'stage': 'DONE',
+                'type': 'SNAPSHOT',
+                'id': 2,
+                'start_time_in_millis': 1494935688173,
+            },
+            {
+                'total_time': '10m',
+                'index': {
+                    'files': {
+                        'reused': 0,
+                        'total': 15,
+                        'percent': '100.0%',
+                        'recovered': 15,
+                    },
+                    'total_time': '10m',
+                    'target_throttle_time': '-1',
+                    'total_time_in_millis': 601453,
+                    'source_throttle_time_in_millis': 0,
+                    'source_throttle_time': '-1',
+                    'target_throttle_time_in_millis': 0,
+                    'size': {
+                        'recovered_in_bytes': 3168132171,
+                        'reused': '0b',
+                        'total_in_bytes': 3168132171,
+                        'percent': '100.0%',
+                        'reused_in_bytes': 0,
+                        'total': '2.9gb',
+                        'recovered': '2.9gb',
+                    },
+                },
+                'verify_index': {
+                    'total_time': '0s',
+                    'total_time_in_millis': 0,
+                    'check_index_time_in_millis': 0,
+                    'check_index_time': '0s',
+                },
+                'target': {
+                    'ip': 'x.x.x.7',
+                    'host': 'x.x.x.7',
+                    'transport_address': 'x.x.x.7:9300',
+                    'id': 'K4xQPaOFSWSPLwhb0P47aQ',
+                    'name': 'staging-es5-forcem',
+                },
+                'source': {
+                    'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d',
+                    'version': '5.1.1',
+                    'snapshot': 'force-merge',
+                    'repository': 'force-merge',
+                },
+                'translog': {
+                    'total_time': '43ms',
+                    'percent': '100.0%',
+                    'total_time_in_millis': 43,
+                    'total_on_start': 0,
+                    'total': 0,
+                    'recovered': 0,
+                },
+                'start_time': '2017-05-16T12:04:54.905Z',
+                'primary': True,
+                'total_time_in_millis': 601503,
+                'stop_time_in_millis': 1494936896408,
+                'stop_time': '2017-05-16T12:14:56.408Z',
+                'stage': 'DONE',
+                'type': 'SNAPSHOT',
+                'id': 7,
+                'start_time_in_millis': 1494936294905,
+            },
+            {
+                'total_time': '10m',
+                'index': {
+                    'files': {
+                        'reused': 0,
+                        'total': 15,
+                        'percent': '100.0%',
+                        'recovered': 15,
+                    },
+                    'total_time': '10m',
+                    'target_throttle_time': '-1',
+                    'total_time_in_millis': 602897,
+                    'source_throttle_time_in_millis': 0,
+                    'source_throttle_time': '-1',
+                    'target_throttle_time_in_millis': 0,
+                    'size': {
+                        'recovered_in_bytes': 3153750393,
+                        'reused': '0b',
+                        'total_in_bytes': 3153750393,
+                        'percent': '100.0%',
+                        'reused_in_bytes': 0,
+                        'total': '2.9gb',
+                        'recovered': '2.9gb',
+                    },
+                },
+                'verify_index': {
+                    'total_time': '0s',
+                    'total_time_in_millis': 0,
+                    'check_index_time_in_millis': 0,
+                    'check_index_time': '0s',
+                },
+                'target': {
+                    'ip': 'x.x.x.7',
+                    'host': 'x.x.x.7',
+                    'transport_address': 'x.x.x.7:9300',
+                    'id': 'K4xQPaOFSWSPLwhb0P47aQ',
+                    'name': 'staging-es5-forcem',
+                },
+                'source': {
+                    'index': 'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d',
+                    'version': '5.1.1',
+                    'snapshot': 'force-merge',
+                    'repository': 'force-merge',
+                },
+                'translog': {
+                    'total_time': '271ms',
+                    'percent': '100.0%',
+                    'total_time_in_millis': 271,
+                    'total_on_start': 0,
+                    'total': 0,
+                    'recovered': 0,
+                },
+                'start_time': '2017-05-16T11:54:48.191Z',
+                'primary': True,
+                'total_time_in_millis': 603174,
+                'stop_time_in_millis': 1494936291366,
+                'stop_time': '2017-05-16T12:04:51.366Z',
+                'stage': 'DONE',
+                'type': 'SNAPSHOT',
+                'id': 0,
+                'start_time_in_millis': 1494935688191,
+            },
+        ]
+    }
+}
+no_snap_tasks = {
+    'nodes': {
+        'node1': {'tasks': {'task1': {'action': 'cluster:monitor/tasks/lists[n]'}}}
+    }
+}
+snap_task = {
+    'nodes': {
+        'node1': {'tasks': {'task1': {'action': 'cluster:admin/snapshot/delete'}}}
+    }
+}
+watermark_persistent = {
+    'persistent': {
+        'cluster': {
+            'routing': {
+                'allocation': {'disk': {'watermark': {'low': '11%', 'high': '60gb'}}}
+            }
+        }
+    }
+}
+watermark_transient = {
+    'transient': {
+        'cluster': {
+            'routing': {
+                'allocation': {'disk': {'watermark': {'low': '9%', 'high': '50gb'}}}
+            }
+        }
+    }
+}
 watermark_both = {
-    'persistent': {'cluster':{'routing':{'allocation':{'disk':{'watermark':{'low':'11%','high':'60gb'}}}}}},
-    'transient': {'cluster':{'routing':{'allocation':{'disk':{'watermark':{'low':'9%','high':'50gb'}}}}}},
+    'persistent': {
+        'cluster': {
+            'routing': {
+                'allocation': {'disk': {'watermark': {'low': '11%', 'high': '60gb'}}}
+            }
+        }
+    },
+    'transient': {
+        'cluster': {
+            'routing': {
+                'allocation': {'disk': {'watermark': {'low': '9%', 'high': '50gb'}}}
+            }
+        }
+    },
 }
-empty_cluster_settings = {'persistent':{},'transient':{}}
+empty_cluster_settings = {'persistent': {}, 'transient': {}}
 data_only_node_role = ['data']
-master_data_node_role = ['data','master']
+master_data_node_role = ['data', 'master']
