diff -pruN 4.3.0-3/.coveragerc 4.4.0-1/.coveragerc
--- 4.3.0-3/.coveragerc	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/.coveragerc	2025-12-24 23:39:20.000000000 +0000
@@ -1,3 +1,2 @@
 [run]
-parallel=True
 source=pgcli
diff -pruN 4.3.0-3/.github/PULL_REQUEST_TEMPLATE.md 4.4.0-1/.github/PULL_REQUEST_TEMPLATE.md
--- 4.3.0-3/.github/PULL_REQUEST_TEMPLATE.md	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/.github/PULL_REQUEST_TEMPLATE.md	2025-12-24 23:39:20.000000000 +0000
@@ -8,5 +8,5 @@
 - [ ] I've added this contribution to the `changelog.rst`.
 - [ ] I've added my name to the `AUTHORS` file (or it's already there).
 <!-- We would appreciate if you comply with our code style guidelines. -->
-- [ ] I installed pre-commit hooks (`pip install pre-commit && pre-commit install`), and ran `black` on my code.
+- [ ] I installed pre-commit hooks (`pip install pre-commit && pre-commit install`).
 - [x] Please squash merge this pull request (uncheck if you'd like us to merge as multiple commits)
diff -pruN 4.3.0-3/.github/workflows/ci.yml 4.4.0-1/.github/workflows/ci.yml
--- 4.3.0-3/.github/workflows/ci.yml	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/.github/workflows/ci.yml	2025-12-24 23:39:20.000000000 +0000
@@ -18,7 +18,7 @@ jobs:
 
     services:
       postgres:
-        image: postgres:9.6
+        image: postgres:10
         env:
           POSTGRES_USER: postgres
           POSTGRES_PASSWORD: postgres
@@ -31,10 +31,14 @@ jobs:
           --health-retries 5
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+      - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+      - uses: astral-sh/setup-uv@c7f87aa956e4c323abf06d5dec078e358f6b4d04 # v6.0.0
+        with:
+          version: "latest"
 
       - name: Set up Python ${{ matrix.python-version }}
-        uses: actions/setup-python@v4
+        uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
         with:
           python-version: ${{ matrix.python-version }}
 
@@ -68,32 +72,21 @@ jobs:
             psql -h localhost -U postgres -p 6432 pgbouncer -c 'show help'
 
       - name: Install requirements
-        run: |
-          pip install -U pip setuptools
-          pip install --no-cache-dir ".[sshtunnel]"
-          pip install -r requirements-dev.txt
-          pip install keyrings.alt>=3.1
+        run: uv sync --all-extras -p ${{ matrix.python-version }}
 
       - name: Run unit tests
-        run: coverage run --source pgcli -m pytest
+        run: uv run tox -e py${{ matrix.python-version }}
 
-      # - name: Run integration tests
-      #   env:
-      #       PGUSER: postgres
-      #       PGPASSWORD: postgres
-      #       TERM: xterm
+      - name: Run integration tests
+        env:
+            PGUSER: postgres
+            PGPASSWORD: postgres
+            TERM: xterm
 
-      #   run: behave tests/features --no-capture
+        run: uv run tox -e integration
 
       - name: Check changelog for ReST compliance
-        run: docutils --halt=warning changelog.rst >/dev/null
+        run: uv run tox -e rest
 
-      - name: Run Black
-        run: black --check .
-        if: matrix.python-version == '3.8'
-
-      - name: Coverage
-        run: |
-          coverage combine
-          coverage report
-          codecov
+      - name: Run style checks
+        run: uv run tox -e style
diff -pruN 4.3.0-3/.github/workflows/publish.yml 4.4.0-1/.github/workflows/publish.yml
--- 4.3.0-3/.github/workflows/publish.yml	1970-01-01 00:00:00.000000000 +0000
+++ 4.4.0-1/.github/workflows/publish.yml	2025-12-24 23:39:20.000000000 +0000
@@ -0,0 +1,97 @@
+name: Publish Python Package
+
+on:
+  release:
+    types: [created]
+
+permissions:
+  contents: read
+
+jobs:
+  test:
+    runs-on: ubuntu-latest
+
+    strategy:
+      matrix:
+        python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
+
+    services:
+      postgres:
+        image: postgres:10
+        env:
+          POSTGRES_USER: postgres
+          POSTGRES_PASSWORD: postgres
+        ports:
+            - 5432:5432
+        options: >-
+          --health-cmd pg_isready
+          --health-interval 10s
+          --health-timeout 5s
+          --health-retries 5
+
+    steps:
+      - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+      - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+      - uses: astral-sh/setup-uv@c7f87aa956e4c323abf06d5dec078e358f6b4d04 # v6.0.0
+        with:
+          version: "latest"
+
+      - name: Set up Python ${{ matrix.python-version }}
+        uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+        with:
+          python-version: ${{ matrix.python-version }}
+
+      - name: Install dependencies
+        run: uv sync --all-extras -p ${{ matrix.python-version }}
+
+      - name: Run unit tests
+        env:
+          LANG: en_US.UTF-8
+        run: uv run tox -e py${{ matrix.python-version }}
+
+      - name: Run Style Checks
+        run: uv run tox -e style
+
+  build:
+    runs-on: ubuntu-latest
+    needs: [test]
+
+    steps:
+    - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+    - uses: astral-sh/setup-uv@c7f87aa956e4c323abf06d5dec078e358f6b4d04 # v6.0.0
+      with:
+        version: "latest"
+
+    - name: Set up Python
+      uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
+      with:
+        python-version: '3.13'
+
+    - name: Install dependencies
+      run: uv sync --all-extras -p 3.13
+
+    - name: Build
+      run: uv build
+
+    - name: Store the distribution packages
+      uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+      with:
+        name: python-packages
+        path: dist/
+
+  publish:
+    name: Publish to PyPI
+    runs-on: ubuntu-latest
+    if: startsWith(github.ref, 'refs/tags/')
+    needs: [build]
+    environment: release
+    permissions:
+      id-token: write
+    steps:
+    - name: Download distribution packages
+      uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
+      with:
+        name: python-packages
+        path: dist/
+    - name: Publish to PyPI
+      uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4
\ No newline at end of file
diff -pruN 4.3.0-3/.gitignore 4.4.0-1/.gitignore
--- 4.3.0-3/.gitignore	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/.gitignore	2025-12-24 23:39:20.000000000 +0000
@@ -72,4 +72,5 @@ target/
 venv/
 
 .ropeproject/
+uv.lock
 
diff -pruN 4.3.0-3/.pre-commit-config.yaml 4.4.0-1/.pre-commit-config.yaml
--- 4.3.0-3/.pre-commit-config.yaml	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/.pre-commit-config.yaml	2025-12-24 23:39:20.000000000 +0000
@@ -1,5 +1,10 @@
 repos:
--   repo: https://github.com/psf/black
-    rev: 23.3.0
-    hooks:
-    - id: black
+- repo: https://github.com/astral-sh/ruff-pre-commit
+  # Ruff version.
+  rev: v0.11.7
+  hooks:
+    # Run the linter.
+    - id: ruff
+      args: [ --fix ]
+    # Run the formatter.
+    - id: ruff-format
diff -pruN 4.3.0-3/AUTHORS 4.4.0-1/AUTHORS
--- 4.3.0-3/AUTHORS	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/AUTHORS	2025-12-24 23:39:20.000000000 +0000
@@ -141,6 +141,9 @@ Contributors:
     * Josh Lynch (josh-lynch)
     * Fabio (3ximus)
     * Doug Harris (dougharris)
+    * Jay Knight (jay-knight)
+    * fbdb
+    * Charbel Jacquin (charbeljc)
 
 Creator:
 --------
diff -pruN 4.3.0-3/CONTRIBUTING.rst 4.4.0-1/CONTRIBUTING.rst
--- 4.3.0-3/CONTRIBUTING.rst	1970-01-01 00:00:00.000000000 +0000
+++ 4.4.0-1/CONTRIBUTING.rst	2025-12-24 23:39:20.000000000 +0000
@@ -0,0 +1,216 @@
+Development Guide
+-----------------
+This is a guide for developers who would like to contribute to this project.
+
+GitHub Workflow
+---------------
+
+If you're interested in contributing to pgcli, first of all my heart felt
+thanks. `Fork the project <https://github.com/dbcli/pgcli>`_ on github.  Then
+clone your fork into your computer (``git clone <url-for-your-fork>``).  Make
+the changes and create the commits in your local machine. Then push those
+changes to your fork. Then click on the pull request icon on github and create
+a new pull request. Add a description about the change and send it along. I
+promise to review the pull request in a reasonable window of time and get back
+to you.
+
+In order to keep your fork up to date with any changes from mainline, add a new
+git remote to your local copy called 'upstream' and point it to the main pgcli
+repo.
+
+::
+
+   $ git remote add upstream git@github.com:dbcli/pgcli.git
+
+Once the 'upstream' end point is added you can then periodically do a ``git
+pull upstream main`` to update your local copy and then do a ``git push
+origin main`` to keep your own fork up to date.
+
+Check Github's `Understanding the GitHub flow guide
+<https://guides.github.com/introduction/flow/>`_ for a more detailed
+explanation of this process.
+
+Local Setup
+-----------
+
+The installation instructions in the README file are intended for users of
+pgcli. If you're developing pgcli, you'll need to install it in a slightly
+different way so you can see the effects of your changes right away without
+having to go through the install cycle every time you change the code.
+
+Set up [uv](https://docs.astral.sh/uv/getting-started/installation/) for development:
+
+::
+
+    cd pgcli
+    uv venv
+    source ./pgcli-dev/bin/activate
+
+Once the virtualenv is activated, install pgcli using pip as follows:
+
+::
+
+    $ uv pip install --editable .
+
+    or
+
+    $ uv pip install -e .
+
+This will install the necessary dependencies as well as install pgcli from the
+working folder into the virtualenv. By installing it using `pip install -e`
+we've linked the pgcli installation with the working copy. Any changes made
+to the code are immediately available in the installed version of pgcli. This
+makes it easy to change something in the code, launch pgcli and check the
+effects of your changes.
+
+Adding PostgreSQL Special (Meta) Commands
+-----------------------------------------
+
+If you want to work on adding new meta-commands (such as `\dp`, `\ds`, `dy`),
+you need to contribute to `pgspecial <https://github.com/dbcli/pgspecial/>`_
+project.
+
+Visual Studio Code Debugging
+-----------------------------
+To set up Visual Studio Code to debug pgcli requires a launch.json file.
+
+Within the project, create a file: .vscode\\launch.json like below.
+
+::
+
+    {
+        // Use IntelliSense to learn about possible attributes.
+        // Hover to view descriptions of existing attributes.
+        // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+        "version": "0.2.0",
+        "configurations": [
+            {
+                "name": "Python: Module",
+                "type": "python",
+                "request": "launch",
+                "module": "pgcli.main",
+                "justMyCode": false,
+                "console": "externalTerminal",
+                "env": {
+                    "PGUSER": "postgres",
+                    "PGPASS": "password",
+                    "PGHOST": "localhost",
+                    "PGPORT": "5432"
+                }
+            }
+        ]
+    }
+
+Building RPM and DEB packages
+-----------------------------
+
+You will need Vagrant 1.7.2 or higher. In the project root there is a
+Vagrantfile that is setup to do multi-vm provisioning. If you're setting things
+up for the first time, then do:
+
+::
+
+    $ version=x.y.z vagrant up debian
+    $ version=x.y.z vagrant up centos
+
+If you already have those VMs setup and you're merely creating a new version of
+DEB or RPM package, then you can do:
+
+::
+
+    $ version=x.y.z vagrant provision
+
+That will create a .deb file and a .rpm file.
+
+The deb package can be installed as follows:
+
+::
+
+    $ sudo dpkg -i pgcli*.deb   # if dependencies are available.
+
+    or
+
+    $ sudo apt-get install -f pgcli*.deb  # if dependencies are not available.
+
+
+The rpm package can be installed as follows:
+
+::
+
+    $ sudo yum install pgcli*.rpm
+
+Running the integration tests
+-----------------------------
+
+Integration tests use `behave package <https://behave.readthedocs.io/>`_ and
+pytest.
+Configuration settings for this package are provided via a ``behave.ini`` file
+in the ``tests`` directory.  An example::
+
+    [behave]
+    stderr_capture = false
+
+    [behave.userdata]
+    pg_test_user = dbuser
+    pg_test_host = db.example.com
+    pg_test_port = 30000
+
+First, install the requirements for testing:
+
+::
+    $ uv pip install ".[dev]"
+
+Ensure that the database user has permissions to create and drop test databases
+by checking your ``pg_hba.conf`` file. The default user should be ``postgres``
+at ``localhost``. Make sure the authentication method is set to ``trust``. If
+you made any changes to your ``pg_hba.conf`` make sure to restart the postgres
+service for the changes to take effect.
+
+::
+
+    # ONLY IF YOU MADE CHANGES TO YOUR pg_hba.conf FILE
+    $ sudo service postgresql restart
+
+After that:
+
+::
+
+    $ cd pgcli/tests
+    $ behave
+
+Note that these ``behave`` tests do not currently work when developing on Windows due to pexpect incompatibility.
+
+To see stdout/stderr, use the following command:
+
+::
+
+    $ behave --no-capture
+
+Troubleshooting the integration tests
+-------------------------------------
+
+- Make sure postgres instance on localhost is running
+- Check your ``pg_hba.conf`` file to verify local connections are enabled
+- Check `this issue <https://github.com/dbcli/pgcli/issues/945>`_ for relevant information.
+- `File an issue <https://github.com/dbcli/pgcli/issues/new>`_.
+
+Running the unit tests
+----------------------
+
+The unit tests can be run with pytest:
+
+::
+
+    $ cd pgcli
+    $ pytest
+
+
+Coding Style
+------------
+
+``pgcli`` uses `ruff <https://github.com/astral-sh/ruff>`_ to format the source code.
+
+Releases
+--------
+
+If you're the person responsible for releasing `pgcli`, `this guide <https://github.com/dbcli/pgcli/blob/main/RELEASES.md>`_ is for you.
diff -pruN 4.3.0-3/DEVELOP.rst 4.4.0-1/DEVELOP.rst
--- 4.3.0-3/DEVELOP.rst	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/DEVELOP.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,220 +0,0 @@
-Development Guide
------------------
-This is a guide for developers who would like to contribute to this project.
-
-GitHub Workflow
----------------
-
-If you're interested in contributing to pgcli, first of all my heart felt
-thanks. `Fork the project <https://github.com/dbcli/pgcli>`_ on github.  Then
-clone your fork into your computer (``git clone <url-for-your-fork>``).  Make
-the changes and create the commits in your local machine. Then push those
-changes to your fork. Then click on the pull request icon on github and create
-a new pull request. Add a description about the change and send it along. I
-promise to review the pull request in a reasonable window of time and get back
-to you.
-
-In order to keep your fork up to date with any changes from mainline, add a new
-git remote to your local copy called 'upstream' and point it to the main pgcli
-repo.
-
-::
-
-   $ git remote add upstream git@github.com:dbcli/pgcli.git
-
-Once the 'upstream' end point is added you can then periodically do a ``git
-pull upstream master`` to update your local copy and then do a ``git push
-origin master`` to keep your own fork up to date.
-
-Check Github's `Understanding the GitHub flow guide
-<https://guides.github.com/introduction/flow/>`_ for a more detailed
-explanation of this process.
-
-Local Setup
------------
-
-The installation instructions in the README file are intended for users of
-pgcli. If you're developing pgcli, you'll need to install it in a slightly
-different way so you can see the effects of your changes right away without
-having to go through the install cycle every time you change the code.
-
-It is highly recommended to use virtualenv for development. If you don't know
-what a virtualenv is, `this guide <http://docs.python-guide.org/en/latest/dev/virtualenvs/#virtual-environments>`_
-will help you get started.
-
-Create a virtualenv (let's call it pgcli-dev). Activate it:
-
-::
-
-    source ./pgcli-dev/bin/activate
-
-    or
-
-    .\pgcli-dev\scripts\activate (for Windows)
-
-Once the virtualenv is activated, `cd` into the local clone of pgcli folder
-and install pgcli using pip as follows:
-
-::
-
-    $ pip install --editable .
-
-    or
-
-    $ pip install -e .
-
-This will install the necessary dependencies as well as install pgcli from the
-working folder into the virtualenv. By installing it using `pip install -e`
-we've linked the pgcli installation with the working copy. Any changes made
-to the code are immediately available in the installed version of pgcli. This
-makes it easy to change something in the code, launch pgcli and check the
-effects of your changes.
-
-Adding PostgreSQL Special (Meta) Commands
------------------------------------------
-
-If you want to work on adding new meta-commands (such as `\dp`, `\ds`, `dy`),
-you need to contribute to `pgspecial <https://github.com/dbcli/pgspecial/>`_
-project.
-
-Visual Studio Code Debugging
------------------------------
-To set up Visual Studio Code to debug pgcli requires a launch.json file.
-
-Within the project, create a file: .vscode\\launch.json like below.
-
-::
-
-    {
-        // Use IntelliSense to learn about possible attributes.
-        // Hover to view descriptions of existing attributes.
-        // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
-        "version": "0.2.0",
-        "configurations": [
-            {
-                "name": "Python: Module",
-                "type": "python",
-                "request": "launch",
-                "module": "pgcli.main",
-                "justMyCode": false,
-                "console": "externalTerminal",
-                "env": {
-                    "PGUSER": "postgres",
-                    "PGPASS": "password",
-                    "PGHOST": "localhost",
-                    "PGPORT": "5432"
-                }
-            }
-        ]
-    }
-
-Building RPM and DEB packages
------------------------------
-
-You will need Vagrant 1.7.2 or higher. In the project root there is a
-Vagrantfile that is setup to do multi-vm provisioning. If you're setting things
-up for the first time, then do:
-
-::
-
-    $ version=x.y.z vagrant up debian
-    $ version=x.y.z vagrant up centos
-
-If you already have those VMs setup and you're merely creating a new version of
-DEB or RPM package, then you can do:
-
-::
-
-    $ version=x.y.z vagrant provision
-
-That will create a .deb file and a .rpm file.
-
-The deb package can be installed as follows:
-
-::
-
-    $ sudo dpkg -i pgcli*.deb   # if dependencies are available.
-
-    or
-
-    $ sudo apt-get install -f pgcli*.deb  # if dependencies are not available.
-
-
-The rpm package can be installed as follows:
-
-::
-
-    $ sudo yum install pgcli*.rpm
-
-Running the integration tests
------------------------------
-
-Integration tests use `behave package <https://behave.readthedocs.io/>`_ and
-pytest.
-Configuration settings for this package are provided via a ``behave.ini`` file
-in the ``tests`` directory.  An example::
-
-    [behave]
-    stderr_capture = false
-
-    [behave.userdata]
-    pg_test_user = dbuser
-    pg_test_host = db.example.com
-    pg_test_port = 30000
-
-First, install the requirements for testing:
-
-::
-    $ pip install -U pip setuptools 
-    $ pip install --no-cache-dir ".[sshtunnel]" 
-    $ pip install -r requirements-dev.txt 
-
-Ensure that the database user has permissions to create and drop test databases
-by checking your ``pg_hba.conf`` file. The default user should be ``postgres``
-at ``localhost``. Make sure the authentication method is set to ``trust``. If
-you made any changes to your ``pg_hba.conf`` make sure to restart the postgres
-service for the changes to take effect.
-
-::
-
-    # ONLY IF YOU MADE CHANGES TO YOUR pg_hba.conf FILE
-    $ sudo service postgresql restart
-
-After that, tests in the ``/pgcli/tests`` directory can be run with:
-(Note that these ``behave`` tests do not currently work when developing on Windows due to pexpect incompatibility.)
-
-::
-
-    # on directory /pgcli/tests
-    $ behave
-
-And on the ``/pgcli`` directory:
-
-::
-
-    # on directory /pgcli
-    $ py.test
-
-To see stdout/stderr, use the following command:
-
-::
-
-    $ behave --no-capture
-
-Troubleshooting the integration tests
--------------------------------------
-
-- Make sure postgres instance on localhost is running
-- Check your ``pg_hba.conf`` file to verify local connections are enabled
-- Check `this issue <https://github.com/dbcli/pgcli/issues/945>`_ for relevant information.
-- `File an issue <https://github.com/dbcli/pgcli/issues/new>`_.
-
-Coding Style
-------------
-
-``pgcli`` uses `black <https://github.com/ambv/black>`_ to format the source code. Make sure to install black.
-
-Releases
---------
-
-If you're the person responsible for releasing `pgcli`, `this guide <https://github.com/dbcli/pgcli/blob/main/RELEASES.md>`_ is for you.
diff -pruN 4.3.0-3/README.rst 4.4.0-1/README.rst
--- 4.3.0-3/README.rst	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/README.rst	2025-12-24 23:39:20.000000000 +0000
@@ -155,9 +155,10 @@ If you're interested in contributing to
 to extend my heartfelt gratitude. I've written a small doc to describe how to
 get this running in a development setup.
 
-https://github.com/dbcli/pgcli/blob/master/DEVELOP.rst
+https://github.com/dbcli/pgcli/blob/main/CONTRIBUTING.rst
 
 Please feel free to reach out to us if you need help.
+
 * Amjith, pgcli author: amjith.r@gmail.com, Twitter: `@amjithr <http://twitter.com/amjithr>`_
 * Irina, pgcli maintainer: i.chernyavska@gmail.com, Twitter: `@irinatruong <http://twitter.com/irinatruong>`_
 
@@ -209,43 +210,27 @@ If pip is not installed check if easy_in
 Linux:
 ======
 
-In depth getting started guide for ``pip`` - https://pip.pypa.io/en/latest/installation/
-
-Check if pip is already available in your system.
+Many distributions have ``pgcli`` packages.
+Refer to https://repology.org/project/pgcli/versions or your distribution to check the available versions.
 
-::
+Alternatively, you can use tools such as `pipx`_ or `uvx`_ to install the latest published package to an isolated virtual environment.
 
-    $ which pip
+.. _pipx: https://pipx.pypa.io/
+.. _uvx: https://docs.astral.sh/uv/guides/tools/
 
-If it doesn't exist, use your linux package manager to install `pip`. This
-might look something like:
+Run:
 
 ::
 
-    $ sudo apt-get install python-pip   # Debian, Ubuntu, Mint etc
-
-    or
-
-    $ sudo yum install python-pip  # RHEL, Centos, Fedora etc
-
-``pgcli`` requires python-dev, libpq-dev and libevent-dev packages. You can
-install these via your operating system package manager.
+    $ pipx install pgcli
 
+to install ``pgcli`` with ``pipx``, or run:
 
 ::
 
-    $ sudo apt-get install python-dev libpq-dev libevent-dev
-
-    or
-
-    $ sudo yum install python-devel postgresql-devel
-
-Then you can install pgcli:
-
-::
-
-    $ sudo pip install pgcli
+    $ uvx pgcli
 
+to run ``pgcli`` by installing on the fly with ``uvx``.
 
 Docker
 ======
@@ -378,12 +363,12 @@ Thanks to all the beta testers and contr
 .. |Build Status| image:: https://github.com/dbcli/pgcli/actions/workflows/ci.yml/badge.svg?branch=main
     :target: https://github.com/dbcli/pgcli/actions/workflows/ci.yml
 
-.. |CodeCov| image:: https://codecov.io/gh/dbcli/pgcli/branch/master/graph/badge.svg
+.. |CodeCov| image:: https://codecov.io/gh/dbcli/pgcli/branch/main/graph/badge.svg
    :target: https://codecov.io/gh/dbcli/pgcli
    :alt: Code coverage report
 
-.. |Landscape| image:: https://landscape.io/github/dbcli/pgcli/master/landscape.svg?style=flat
-   :target: https://landscape.io/github/dbcli/pgcli/master
+.. |Landscape| image:: https://landscape.io/github/dbcli/pgcli/main/landscape.svg?style=flat
+   :target: https://landscape.io/github/dbcli/pgcli/main
    :alt: Code Health
 
 .. |PyPI| image:: https://img.shields.io/pypi/v/pgcli.svg
diff -pruN 4.3.0-3/RELEASES.md 4.4.0-1/RELEASES.md
--- 4.3.0-3/RELEASES.md	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/RELEASES.md	2025-12-24 23:39:20.000000000 +0000
@@ -1,24 +1,6 @@
 Releasing pgcli
 ---------------
 
-You have been made the maintainer of `pgcli`? Congratulations! We have a release script to help you:
+You have been made the maintainer of `pgcli`? Congratulations!
 
-```sh
-> python release.py --help
-Usage: release.py [options]
-
-Options:
-  -h, --help           show this help message and exit
-  -c, --confirm-steps  Confirm every step. If the step is not confirmed, it
-                       will be skipped.
-  -d, --dry-run        Print out, but not actually run any steps.
-```
-
-The script can be run with `-c` to confirm or skip steps. There's also a `--dry-run` option that only prints out the steps.
-
-To release a new version of the package:
-
-* Create and merge a PR to bump the version in the changelog ([example PR](https://github.com/dbcli/pgcli/pull/1325)).
-* Pull `main` and bump the version number inside `pgcli/__init__.py`. Do not check in - the release script will do that.
-* Make sure you have the dev requirements installed: `pip install -r requirements-dev.txt -U --upgrade-strategy only-if-needed`.
-* Finally, run the release script: `python release.py`.
+To release a new version of the package, [create a new release](https://github.com/dbcli/pgcli/releases) in Github. This will trigger a Github action which will run all the tests, build the wheel and upload it to PyPI.
\ No newline at end of file
diff -pruN 4.3.0-3/changelog.rst 4.4.0-1/changelog.rst
--- 4.3.0-3/changelog.rst	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/changelog.rst	2025-12-24 23:39:20.000000000 +0000
@@ -1,3 +1,29 @@
+4.4.0 (2025-12-24)
+==============
+
+Features:
+---------
+* Add support for `init-command` to run when the connection is established.
+    * Command line option `--init-command`
+    * Provide `init-command` in the config file
+    * Support dsn specific init-command in the config file
+* Add suggestion when setting the search_path
+* Allow per dsn_alias ssh tunnel selection
+
+Internal:
+---------
+
+* Moderize the repository
+  * Use uv instead of pip
+  * Use github trusted publisher for pypi release
+  * Update dev requirements and replace requirements-dev.txt with pyproject.toml
+  * Use ruff instead of black
+
+Bug fixes:
+----------
+
+* Improve display of larger durations when passed as floats
+
 4.3.0 (2025-03-22)
 ==================
 
diff -pruN 4.3.0-3/debian/changelog 4.4.0-1/debian/changelog
--- 4.3.0-3/debian/changelog	2025-11-30 17:36:05.000000000 +0000
+++ 4.4.0-1/debian/changelog	2025-12-29 10:09:49.000000000 +0000
@@ -1,3 +1,10 @@
+pgcli (4.4.0-1) sid; urgency=medium
+
+  * Updating to standards version 4.7.3.
+  * Merging upstream version 4.4.0.
+
+ -- Daniel Baumann <daniel@debian.org>  Mon, 29 Dec 2025 11:09:49 +0100
+
 pgcli (4.3.0-3) sid; urgency=medium
 
   * Harmonizing upstream urls.
diff -pruN 4.3.0-3/debian/control 4.4.0-1/debian/control
--- 4.3.0-3/debian/control	2025-11-30 17:36:05.000000000 +0000
+++ 4.4.0-1/debian/control	2025-12-29 10:09:44.000000000 +0000
@@ -12,7 +12,7 @@ Build-Depends:
  python3-psycopg (>= 3.1.7-4~),
  python3-setuptools,
  python3-sqlparse (>= 0.3),
-Standards-Version: 4.7.2
+Standards-Version: 4.7.3
 Homepage: https://github.com/dbcli/pgcli
 Vcs-Browser: https://forgejo.debian.net/dbcli/pgcli
 Vcs-Git: https://forgejo.debian.net/dbcli/pgcli
diff -pruN 4.3.0-3/pgcli/__init__.py 4.4.0-1/pgcli/__init__.py
--- 4.3.0-3/pgcli/__init__.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/__init__.py	2025-12-24 23:39:20.000000000 +0000
@@ -1 +1 @@
-__version__ = "4.3.0"
+__version__ = "4.4.0"
diff -pruN 4.3.0-3/pgcli/auth.py 4.4.0-1/pgcli/auth.py
--- 4.3.0-3/pgcli/auth.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/auth.py	2025-12-24 23:39:20.000000000 +0000
@@ -26,9 +26,7 @@ def keyring_initialize(keyring_enabled,
 
         try:
             keyring = importlib.import_module("keyring")
-        except (
-            ModuleNotFoundError
-        ) as e:  # ImportError for Python 2, ModuleNotFoundError for Python 3
+        except ModuleNotFoundError as e:  # ImportError for Python 2, ModuleNotFoundError for Python 3
             logger.warning("import keyring failed: %r.", e)
 
 
@@ -40,9 +38,7 @@ def keyring_get_password(key):
         passwd = keyring.get_password("pgcli", key) or ""
     except Exception as e:
         click.secho(
-            keyring_error_message.format(
-                "Load your password from keyring returned:", str(e)
-            ),
+            keyring_error_message.format("Load your password from keyring returned:", str(e)),
             err=True,
             fg="red",
         )
diff -pruN 4.3.0-3/pgcli/completion_refresher.py 4.4.0-1/pgcli/completion_refresher.py
--- 4.3.0-3/pgcli/completion_refresher.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/completion_refresher.py	2025-12-24 23:39:20.000000000 +0000
@@ -40,18 +40,14 @@ class CompletionRefresher:
             )
             self._completer_thread.daemon = True
             self._completer_thread.start()
-            return [
-                (None, None, None, "Auto-completion refresh started in the background.")
-            ]
+            return [(None, None, None, "Auto-completion refresh started in the background.")]
 
     def is_refreshing(self):
         return self._completer_thread and self._completer_thread.is_alive()
 
     def _bg_refresh(self, pgexecute, special, callbacks, history=None, settings=None):
         settings = settings or {}
-        completer = PGCompleter(
-            smart_completion=True, pgspecial=special, settings=settings
-        )
+        completer = PGCompleter(smart_completion=True, pgspecial=special, settings=settings)
 
         if settings.get("single_connection"):
             executor = pgexecute
diff -pruN 4.3.0-3/pgcli/config.py 4.4.0-1/pgcli/config.py
--- 4.3.0-3/pgcli/config.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/config.py	2025-12-24 23:39:20.000000000 +0000
@@ -1,4 +1,3 @@
-import errno
 import shutil
 import os
 import platform
diff -pruN 4.3.0-3/pgcli/key_bindings.py 4.4.0-1/pgcli/key_bindings.py
--- 4.3.0-3/pgcli/key_bindings.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/key_bindings.py	2025-12-24 23:39:20.000000000 +0000
@@ -107,8 +107,7 @@ def pgcli_bindings(pgcli):
     # history search, and one of several conditions are True
     @kb.add(
         "enter",
-        filter=~(completion_is_selected | is_searching)
-        & buffer_should_be_handled(pgcli),
+        filter=~(completion_is_selected | is_searching) & buffer_should_be_handled(pgcli),
     )
     def _(event):
         _logger.debug("Detected enter key.")
diff -pruN 4.3.0-3/pgcli/main.py 4.4.0-1/pgcli/main.py
--- 4.3.0-3/pgcli/main.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/main.py	2025-12-24 23:39:20.000000000 +0000
@@ -139,9 +139,7 @@ class PgCliQuitError(Exception):
 
 def notify_callback(notify: Notify):
     click.secho(
-        'Notification received on channel "{}" (PID {}):\n{}'.format(
-            notify.channel, notify.pid, notify.payload
-        ),
+        'Notification received on channel "{}" (PID {}):\n{}'.format(notify.channel, notify.pid, notify.payload),
         fg="green",
     )
 
@@ -155,9 +153,7 @@ class PGCli:
         os_environ_pager = os.environ.get("PAGER")
 
         if configured_pager:
-            self.logger.info(
-                'Default pager found in config file: "%s"', configured_pager
-            )
+            self.logger.info('Default pager found in config file: "%s"', configured_pager)
             os.environ["PAGER"] = configured_pager
         elif os_environ_pager:
             self.logger.info(
@@ -166,9 +162,7 @@ class PGCli:
             )
             os.environ["PAGER"] = os_environ_pager
         else:
-            self.logger.info(
-                "No default pager found in environment. Using os default pager"
-            )
+            self.logger.info("No default pager found in environment. Using os default pager")
 
         # Set default set of less recommended options, if they are not already set.
         # They are ignored if pager is different than less.
@@ -219,9 +213,7 @@ class PGCli:
         self.multiline_mode = c["main"].get("multi_line_mode", "psql")
         self.vi_mode = c["main"].as_bool("vi")
         self.auto_expand = auto_vertical_output or c["main"].as_bool("auto_expand")
-        self.auto_retry_closed_connection = c["main"].as_bool(
-            "auto_retry_closed_connection"
-        )
+        self.auto_retry_closed_connection = c["main"].as_bool("auto_retry_closed_connection")
         self.expanded_output = c["main"].as_bool("expand")
         self.pgspecial.timing_enabled = c["main"].as_bool("timing")
         if row_limit is not None:
@@ -247,26 +239,14 @@ class PGCli:
         self.syntax_style = c["main"]["syntax_style"]
         self.cli_style = c["colors"]
         self.wider_completion_menu = c["main"].as_bool("wider_completion_menu")
-        self.destructive_warning = parse_destructive_warning(
-            warn or c["main"].as_list("destructive_warning")
-        )
-        self.destructive_warning_restarts_connection = c["main"].as_bool(
-            "destructive_warning_restarts_connection"
-        )
-        self.destructive_statements_require_transaction = c["main"].as_bool(
-            "destructive_statements_require_transaction"
-        )
+        self.destructive_warning = parse_destructive_warning(warn or c["main"].as_list("destructive_warning"))
+        self.destructive_warning_restarts_connection = c["main"].as_bool("destructive_warning_restarts_connection")
+        self.destructive_statements_require_transaction = c["main"].as_bool("destructive_statements_require_transaction")
 
         self.less_chatty = bool(less_chatty) or c["main"].as_bool("less_chatty")
-        self.verbose_errors = "verbose_errors" in c["main"] and c["main"].as_bool(
-            "verbose_errors"
-        )
+        self.verbose_errors = "verbose_errors" in c["main"] and c["main"].as_bool("verbose_errors")
         self.null_string = c["main"].get("null_string", "<null>")
-        self.prompt_format = (
-            prompt
-            if prompt is not None
-            else c["main"].get("prompt", self.default_prompt)
-        )
+        self.prompt_format = prompt if prompt is not None else c["main"].get("prompt", self.default_prompt)
         self.prompt_dsn_format = prompt_dsn
         self.on_error = c["main"]["on_error"].upper()
         self.decimal_format = c["data_formats"]["decimal"]
@@ -275,9 +255,7 @@ class PGCli:
         auth.keyring_initialize(c["main"].as_bool("keyring"), logger=self.logger)
         self.show_bottom_toolbar = c["main"].as_bool("show_bottom_toolbar")
 
-        self.pgspecial.pset_pager(
-            self.config["main"].as_bool("enable_pager") and "on" or "off"
-        )
+        self.pgspecial.pset_pager(self.config["main"].as_bool("enable_pager") and "on" or "off")
 
         self.style_output = style_factory_output(self.syntax_style, c["colors"])
 
@@ -290,9 +268,7 @@ class PGCli:
         # Initialize completer
         smart_completion = c["main"].as_bool("smart_completion")
         keyword_casing = c["main"]["keyword_casing"]
-        single_connection = single_connection or c["main"].as_bool(
-            "always_use_single_connection"
-        )
+        single_connection = single_connection or c["main"].as_bool("always_use_single_connection")
         self.settings = {
             "casing_file": get_casing_file(c),
             "generate_casing_file": c["main"].as_bool("generate_casing_file"),
@@ -307,15 +283,14 @@ class PGCli:
             "alias_map_file": c["main"]["alias_map_file"] or None,
         }
 
-        completer = PGCompleter(
-            smart_completion, pgspecial=self.pgspecial, settings=self.settings
-        )
+        completer = PGCompleter(smart_completion, pgspecial=self.pgspecial, settings=self.settings)
         self.completer = completer
         self._completer_lock = threading.Lock()
         self.register_special_commands()
 
         self.prompt_app = None
 
+        self.dsn_ssh_tunnel_config = c.get("dsn ssh tunnels")
         self.ssh_tunnel_config = c.get("ssh tunnels")
         self.ssh_tunnel_url = ssh_tunnel_url
         self.ssh_tunnel = None
@@ -341,7 +316,8 @@ class PGCli:
             aliases=("use", "\\connect", "USE"),
         )
 
-        refresh_callback = lambda: self.refresh_completions(persist_priorities="all")
+        def refresh_callback():
+            return self.refresh_completions(persist_priorities="all")
 
         self.pgspecial.register(
             self.quit,
@@ -375,9 +351,7 @@ class PGCli:
             "Refresh auto-completions.",
             arg_type=NO_QUERY,
         )
-        self.pgspecial.register(
-            self.execute_from_file, "\\i", "\\i filename", "Execute commands from file."
-        )
+        self.pgspecial.register(self.execute_from_file, "\\i", "\\i filename", "Execute commands from file.")
         self.pgspecial.register(
             self.write_to_file,
             "\\o",
@@ -390,9 +364,7 @@ class PGCli:
             "\\log-file [filename]",
             "Log all query results to a logfile, in addition to the normal output destination.",
         )
-        self.pgspecial.register(
-            self.info_connection, "\\conninfo", "\\conninfo", "Get connection details"
-        )
+        self.pgspecial.register(self.info_connection, "\\conninfo", "\\conninfo", "Get connection details")
         self.pgspecial.register(
             self.change_table_format,
             "\\T",
@@ -461,8 +433,7 @@ class PGCli:
             None,
             None,
             'You are connected to database "%s" as user '
-            '"%s" on %s at port "%s".'
-            % (self.pgexecute.dbname, self.pgexecute.user, host, self.pgexecute.port),
+            '"%s" on %s at port "%s".' % (self.pgexecute.dbname, self.pgexecute.user, host, self.pgexecute.port),
         )
 
     def change_db(self, pattern, **_):
@@ -470,7 +441,7 @@ class PGCli:
             # Get all the parameters in pattern, handling double quotes if any.
             infos = re.findall(r'"[^"]*"|[^"\'\s]+', pattern)
             # Now removing quotes.
-            list(map(lambda s: s.strip('"'), infos))
+            [s.strip('"') for s in infos]
 
             infos.extend([None] * (4 - len(infos)))
             db, user, host, port = infos
@@ -492,8 +463,7 @@ class PGCli:
             None,
             None,
             None,
-            'You are now connected to database "%s" as '
-            'user "%s"' % (self.pgexecute.dbname, self.pgexecute.user),
+            'You are now connected to database "%s" as user "%s"' % (self.pgexecute.dbname, self.pgexecute.user),
         )
 
     def execute_from_file(self, pattern, **_):
@@ -514,9 +484,7 @@ class PGCli:
             ):
                 message = "Destructive statements must be run within a transaction. Command execution stopped."
                 return [(None, None, None, message)]
-            destroy = confirm_destructive_query(
-                query, self.destructive_warning, self.dsn_alias
-            )
+            destroy = confirm_destructive_query(query, self.destructive_warning, self.dsn_alias)
             if destroy is False:
                 message = "Wise choice. Command execution stopped."
                 return [(None, None, None, message)]
@@ -591,10 +559,7 @@ class PGCli:
 
         log_level = level_map[log_level.upper()]
 
-        formatter = logging.Formatter(
-            "%(asctime)s (%(process)d/%(threadName)s) "
-            "%(name)s %(levelname)s - %(message)s"
-        )
+        formatter = logging.Formatter("%(asctime)s (%(process)d/%(threadName)s) %(name)s %(levelname)s - %(message)s")
 
         handler.setFormatter(formatter)
 
@@ -615,9 +580,7 @@ class PGCli:
     def connect_service(self, service, user):
         service_config, file = parse_service_info(service)
         if service_config is None:
-            click.secho(
-                f"service '{service}' was not found in {file}", err=True, fg="red"
-            )
+            click.secho(f"service '{service}' was not found in {file}", err=True, fg="red")
             sys.exit(1)
         self.connect(
             database=service_config.get("dbname"),
@@ -633,9 +596,7 @@ class PGCli:
         kwargs = {remap.get(k, k): v for k, v in kwargs.items()}
         self.connect(**kwargs)
 
-    def connect(
-        self, database="", host="", user="", port="", passwd="", dsn="", **kwargs
-    ):
+    def connect(self, database="", host="", user="", port="", passwd="", dsn="", **kwargs):
         # Connect to the database.
 
         if not user:
@@ -657,9 +618,7 @@ class PGCli:
         # If we successfully parsed a password from a URI, there's no need to
         # prompt for it, even with the -W flag
         if self.force_passwd_prompt and not passwd:
-            passwd = click.prompt(
-                "Password for %s" % user, hide_input=True, show_default=False, type=str
-            )
+            passwd = click.prompt("Password for %s" % user, hide_input=True, show_default=False, type=str)
 
         key = f"{user}@{host}"
 
@@ -685,6 +644,12 @@ class PGCli:
             if "port" in parsed_dsn:
                 port = parsed_dsn["port"]
 
+        if self.dsn_alias and self.dsn_ssh_tunnel_config and not self.ssh_tunnel_url:
+            for dsn_regex, tunnel_url in self.dsn_ssh_tunnel_config.items():
+                if re.search(dsn_regex, self.dsn_alias):
+                    self.ssh_tunnel_url = tunnel_url
+                    break
+
         if self.ssh_tunnel_config and not self.ssh_tunnel_url:
             for db_host_regex, tunnel_url in self.ssh_tunnel_config.items():
                 if re.search(db_host_regex, host):
@@ -825,13 +790,9 @@ class PGCli:
                     and not self.pgexecute.valid_transaction()
                     and is_destructive(text, self.destructive_warning)
                 ):
-                    click.secho(
-                        "Destructive statements must be run within a transaction."
-                    )
+                    click.secho("Destructive statements must be run within a transaction.")
                     raise KeyboardInterrupt
-                destroy = confirm_destructive_query(
-                    text, self.destructive_warning, self.dsn_alias
-                )
+                destroy = confirm_destructive_query(text, self.destructive_warning, self.dsn_alias)
                 if destroy is False:
                     click.secho("Wise choice!")
                     raise KeyboardInterrupt
@@ -844,9 +805,7 @@ class PGCli:
                 # Restart connection to the database
                 self.pgexecute.connect()
                 logger.debug("cancelled query and restarted connection, sql: %r", text)
-                click.secho(
-                    "cancelled query and restarted connection", err=True, fg="red"
-                )
+                click.secho("cancelled query and restarted connection", err=True, fg="red")
             else:
                 logger.debug("cancelled query, sql: %r", text)
                 click.secho("cancelled query", err=True, fg="red")
@@ -866,9 +825,7 @@ class PGCli:
             click.secho(str(e), err=True, fg="red")
         else:
             try:
-                if self.output_file and not text.startswith(
-                    ("\\o ", "\\log-file", "\\? ", "\\echo ")
-                ):
+                if self.output_file and not text.startswith(("\\o ", "\\log-file", "\\? ", "\\echo ")):
                     try:
                         with open(self.output_file, "a", encoding="utf-8") as f:
                             click.echo(text, file=f)
@@ -881,16 +838,10 @@ class PGCli:
                         self.echo_via_pager("\n".join(output))
 
                 # Log to file in addition to normal output
-                if (
-                    self.log_file
-                    and not text.startswith(("\\o ", "\\log-file", "\\? ", "\\echo "))
-                    and not text.strip() == ""
-                ):
+                if self.log_file and not text.startswith(("\\o ", "\\log-file", "\\? ", "\\echo ")) and not text.strip() == "":
                     try:
                         with open(self.log_file, "a", encoding="utf-8") as f:
-                            click.echo(
-                                dt.datetime.now().isoformat(), file=f
-                            )  # timestamp log
+                            click.echo(dt.datetime.now().isoformat(), file=f)  # timestamp log
                             click.echo(text, file=f)
                             click.echo("\n".join(output), file=f)
                             click.echo("", file=f)  # extra newline
@@ -1018,9 +969,7 @@ class PGCli:
             try:
                 self.watch_command = self.query_history[-1].query
             except IndexError:
-                click.secho(
-                    "\\watch cannot be used with an empty query", err=True, fg="red"
-                )
+                click.secho("\\watch cannot be used with an empty query", err=True, fg="red")
                 self.watch_command = None
 
         # If there's a command to \watch, run it in a loop.
@@ -1050,10 +999,7 @@ class PGCli:
 
             prompt = self.get_prompt(prompt_format)
 
-            if (
-                prompt_format == self.default_prompt
-                and len(prompt) > self.max_len_prompt
-            ):
+            if prompt_format == self.default_prompt and len(prompt) > self.max_len_prompt:
                 prompt = self.get_prompt("\\d> ")
 
             prompt = prompt.replace("\\x1b", "\x1b")
@@ -1116,12 +1062,7 @@ class PGCli:
         if not is_select(sql):
             return False
 
-        return (
-            not self._has_limit(sql)
-            and self.row_limit != 0
-            and cur
-            and cur.rowcount > self.row_limit
-        )
+        return not self._has_limit(sql) and self.row_limit != 0 and cur and cur.rowcount > self.row_limit
 
     def _has_limit(self, sql):
         if not sql:
@@ -1191,18 +1132,12 @@ class PGCli:
                 missingval=self.null_string,
                 expanded=expanded,
                 max_width=max_width,
-                case_function=(
-                    self.completer.case
-                    if self.settings["case_column_headers"]
-                    else lambda x: x
-                ),
+                case_function=(self.completer.case if self.settings["case_column_headers"] else lambda x: x),
                 style_output=self.style_output,
                 max_field_width=self.max_field_width,
             )
             execution = time() - start
-            formatted = format_output(
-                title, cur, headers, status, settings, self.explain_mode
-            )
+            formatted = format_output(title, cur, headers, status, settings, self.explain_mode)
 
             output.extend(formatted)
             total = time() - start
@@ -1241,9 +1176,7 @@ class PGCli:
             click.secho("Reconnect Failed", fg="red")
             click.secho(str(e), err=True, fg="red")
         else:
-            retry = self.auto_retry_closed_connection or confirm(
-                "Run the query from before reconnecting?"
-            )
+            retry = self.auto_retry_closed_connection or confirm("Run the query from before reconnecting?")
             if retry:
                 click.secho("Running query...", fg="green")
                 # Don't get stuck in a retry loop
@@ -1258,9 +1191,7 @@ class PGCli:
         :param persist_priorities: 'all' or 'keywords'
         """
 
-        callback = functools.partial(
-            self._on_completions_refreshed, persist_priorities=persist_priorities
-        )
+        callback = functools.partial(self._on_completions_refreshed, persist_priorities=persist_priorities)
         return self.completion_refresher.refresh(
             self.pgexecute,
             self.pgspecial,
@@ -1311,9 +1242,7 @@ class PGCli:
 
     def get_completions(self, text, cursor_positition):
         with self._completer_lock:
-            return self.completer.get_completions(
-                Document(text=text, cursor_position=cursor_positition), None
-            )
+            return self.completer.get_completions(Document(text=text, cursor_position=cursor_positition), None)
 
     def get_prompt(self, string):
         # should be before replacing \\d
@@ -1340,10 +1269,7 @@ class PGCli:
         """Will this line be too wide to fit into terminal?"""
         if not self.prompt_app:
             return False
-        return (
-            len(COLOR_CODE_REGEX.sub("", line))
-            > self.prompt_app.output.get_size().columns
-        )
+        return len(COLOR_CODE_REGEX.sub("", line)) > self.prompt_app.output.get_size().columns
 
     def is_too_tall(self, lines):
         """Are there too many lines to fit into terminal?"""
@@ -1354,10 +1280,7 @@ class PGCli:
     def echo_via_pager(self, text, color=None):
         if self.pgspecial.pager_config == PAGER_OFF or self.watch_command:
             click.echo(text, color=color)
-        elif (
-            self.pgspecial.pager_config == PAGER_LONG_OUTPUT
-            and self.table_format != "csv"
-        ):
+        elif self.pgspecial.pager_config == PAGER_LONG_OUTPUT and self.table_format != "csv":
             lines = text.split("\n")
 
             # The last 4 lines are reserved for the pgcli menu and padding
@@ -1382,7 +1305,7 @@ class PGCli:
     "-p",
     "--port",
     default=5432,
-    help="Port number at which the " "postgres instance is listening.",
+    help="Port number at which the postgres instance is listening.",
     envvar="PGPORT",
     type=click.INT,
 )
@@ -1392,9 +1315,7 @@ class PGCli:
     "username_opt",
     help="Username to connect to the postgres database.",
 )
-@click.option(
-    "-u", "--user", "username_opt", help="Username to connect to the postgres database."
-)
+@click.option("-u", "--user", "username_opt", help="Username to connect to the postgres database.")
 @click.option(
     "-W",
     "--password",
@@ -1499,6 +1420,12 @@ class PGCli:
     default=None,
     help="Write all queries & output into a file, in addition to the normal output destination.",
 )
+@click.option(
+    "--init-command",
+    "init_command",
+    type=str,
+    help="SQL statement to execute after connecting.",
+)
 @click.argument("dbname", default=lambda: None, envvar="PGDATABASE", nargs=1)
 @click.argument("username", default=lambda: None, envvar="PGUSER", nargs=1)
 def cli(
@@ -1525,6 +1452,7 @@ def cli(
     list_dsn,
     warn,
     ssh_tunnel: str,
+    init_command: str,
     log_file: str,
 ):
     if version:
@@ -1553,10 +1481,9 @@ def cli(
             for alias in cfg["alias_dsn"]:
                 click.secho(alias + " : " + cfg["alias_dsn"][alias])
             sys.exit(0)
-        except Exception as err:
+        except Exception:
             click.secho(
-                "Invalid DSNs found in the config file. "
-                'Please check the "[alias_dsn]" section in pgclirc.',
+                "Invalid DSNs found in the config file. Please check the \"[alias_dsn]\" section in pgclirc.",
                 err=True,
                 fg="red",
             )
@@ -1608,22 +1535,20 @@ def cli(
             dsn_config = cfg["alias_dsn"][dsn]
         except KeyError:
             click.secho(
-                f"Could not find a DSN with alias {dsn}. "
-                'Please check the "[alias_dsn]" section in pgclirc.',
+                f"Could not find a DSN with alias {dsn}. Please check the \"[alias_dsn]\" section in pgclirc.",
                 err=True,
                 fg="red",
             )
             sys.exit(1)
         except Exception:
             click.secho(
-                "Invalid DSNs found in the config file. "
-                'Please check the "[alias_dsn]" section in pgclirc.',
+                "Invalid DSNs found in the config file. Please check the \"[alias_dsn]\" section in pgclirc.",
                 err=True,
                 fg="red",
             )
             sys.exit(1)
-        pgcli.connect_uri(dsn_config)
         pgcli.dsn_alias = dsn
+        pgcli.connect_uri(dsn_config)
     elif "://" in database:
         pgcli.connect_uri(database)
     elif "=" in database and service is None:
@@ -1633,9 +1558,7 @@ def cli(
     else:
         pgcli.connect(database, host, user, port)
 
-    if "use_local_timezone" not in cfg["main"] or cfg["main"].as_bool(
-        "use_local_timezone"
-    ):
+    if "use_local_timezone" not in cfg["main"] or cfg["main"].as_bool("use_local_timezone"):
         server_tz = pgcli.pgexecute.get_timezone()
 
         def echo_error(msg: str):
@@ -1682,6 +1605,33 @@ def cli(
             # of conflicting sources
             echo_error(e.args[0])
 
+    # Merge init-commands: global, DSN-specific, then CLI-provided
+    init_cmds = []
+    # 1) Global init-commands
+    global_section = pgcli.config.get("init-commands", {})
+    for _, val in global_section.items():
+        if isinstance(val, (list, tuple)):
+            init_cmds.extend(val)
+        elif val:
+            init_cmds.append(val)
+    # 2) DSN-specific init-commands
+    if dsn:
+        alias_section = pgcli.config.get("alias_dsn.init-commands", {})
+        if dsn in alias_section:
+            val = alias_section.get(dsn)
+            if isinstance(val, (list, tuple)):
+                init_cmds.extend(val)
+            elif val:
+                init_cmds.append(val)
+    # 3) CLI-provided init-command
+    if init_command:
+        init_cmds.append(init_command)
+    if init_cmds:
+        click.echo("Running init commands: %s" % "; ".join(init_cmds))
+        for cmd in init_cmds:
+            # Execute each init command
+            list(pgcli.pgexecute.run(cmd))
+
     if list_databases:
         cur, headers, status = pgcli.pgexecute.full_databases()
 
@@ -1707,7 +1657,7 @@ def cli(
             sys.exit(0)
 
     pgcli.logger.debug(
-        "Launch Params: \n" "\tdatabase: %r" "\tuser: %r" "\thost: %r" "\tport: %r",
+        "Launch Params: \n\tdatabase: %r\tuser: %r\thost: %r\tport: %r",
         database,
         user,
         host,
@@ -1725,9 +1675,7 @@ def obfuscate_process_password():
     if "://" in process_title:
         process_title = re.sub(r":(.*):(.*)@", r":\1:xxxx@", process_title)
     elif "=" in process_title:
-        process_title = re.sub(
-            r"password=(.+?)((\s[a-zA-Z]+=)|$)", r"password=xxxx\2", process_title
-        )
+        process_title = re.sub(r"password=(.+?)((\s[a-zA-Z]+=)|$)", r"password=xxxx\2", process_title)
 
     setproctitle.setproctitle(process_title)
 
@@ -1867,9 +1815,7 @@ def format_output(title, cur, headers, s
     def format_arrays(data, headers, **_):
         data = list(data)
         for row in data:
-            row[:] = [
-                format_array(val) if isinstance(val, list) else val for val in row
-            ]
+            row[:] = [format_array(val) if isinstance(val, list) else val for val in row]
 
         return data, headers
 
@@ -1934,13 +1880,7 @@ def format_output(title, cur, headers, s
             formatted = iter(formatted.splitlines())
         first_line = next(formatted)
         formatted = itertools.chain([first_line], formatted)
-        if (
-            not explain_mode
-            and not expanded
-            and max_width
-            and len(strip_ansi(first_line)) > max_width
-            and headers
-        ):
+        if not explain_mode and not expanded and max_width and len(strip_ansi(first_line)) > max_width and headers:
             formatted = formatter.format_output(
                 cur,
                 headers,
@@ -1993,12 +1933,12 @@ def duration_in_words(duration_in_second
     components = []
     hours, remainder = divmod(duration_in_seconds, 3600)
     if hours > 1:
-        components.append(f"{hours} hours")
+        components.append(f"{int(hours)} hours")
     elif hours == 1:
         components.append("1 hour")
     minutes, seconds = divmod(remainder, 60)
     if minutes > 1:
-        components.append(f"{minutes} minutes")
+        components.append(f"{int(minutes)} minutes")
     elif minutes == 1:
         components.append("1 minute")
     if seconds >= 2:
diff -pruN 4.3.0-3/pgcli/packages/formatter/sqlformatter.py 4.4.0-1/pgcli/packages/formatter/sqlformatter.py
--- 4.3.0-3/pgcli/packages/formatter/sqlformatter.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/packages/formatter/sqlformatter.py	2025-12-24 23:39:20.000000000 +0000
@@ -52,16 +52,11 @@ def adapter(data, headers, table_format=
             yield 'UPDATE "{}" SET'.format(table_name)
             prefix = "  "
             for i, v in enumerate(d[keys:], keys):
-                yield '{}"{}" = {}'.format(
-                    prefix, headers[i], escape_for_sql_statement(v)
-                )
+                yield '{}"{}" = {}'.format(prefix, headers[i], escape_for_sql_statement(v))
                 if prefix == "  ":
                     prefix = ", "
             f = '"{}" = {}'
-            where = (
-                f.format(headers[i], escape_for_sql_statement(d[i]))
-                for i in range(keys)
-            )
+            where = (f.format(headers[i], escape_for_sql_statement(d[i])) for i in range(keys))
             yield "WHERE {};".format(" AND ".join(where))
 
 
@@ -69,6 +64,4 @@ def register_new_formatter(TabularOutput
     global formatter
     formatter = TabularOutputFormatter
     for sql_format in supported_formats:
-        TabularOutputFormatter.register_new_formatter(
-            sql_format, adapter, preprocessors, {"table_format": sql_format}
-        )
+        TabularOutputFormatter.register_new_formatter(sql_format, adapter, preprocessors, {"table_format": sql_format})
diff -pruN 4.3.0-3/pgcli/packages/parseutils/__init__.py 4.4.0-1/pgcli/packages/parseutils/__init__.py
--- 4.3.0-3/pgcli/packages/parseutils/__init__.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/packages/parseutils/__init__.py	2025-12-24 23:39:20.000000000 +0000
@@ -29,9 +29,7 @@ def is_destructive(queries, keywords):
     for query in sqlparse.split(queries):
         if query:
             formatted_sql = sqlparse.format(query.lower(), strip_comments=True).strip()
-            if "unconditional_update" in keywords and query_is_unconditional_update(
-                formatted_sql
-            ):
+            if "unconditional_update" in keywords and query_is_unconditional_update(formatted_sql):
                 return True
             if query_starts_with(formatted_sql, keywords):
                 return True
diff -pruN 4.3.0-3/pgcli/packages/parseutils/ctes.py 4.4.0-1/pgcli/packages/parseutils/ctes.py
--- 4.3.0-3/pgcli/packages/parseutils/ctes.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/packages/parseutils/ctes.py	2025-12-24 23:39:20.000000000 +0000
@@ -17,7 +17,7 @@ def isolate_query_ctes(full_text, text_b
     """Simplify a query by converting CTEs into table metadata objects"""
 
     if not full_text or not full_text.strip():
-        return full_text, text_before_cursor, tuple()
+        return full_text, text_before_cursor, ()
 
     ctes, remainder = extract_ctes(full_text)
     if not ctes:
diff -pruN 4.3.0-3/pgcli/packages/parseutils/meta.py 4.4.0-1/pgcli/packages/parseutils/meta.py
--- 4.3.0-3/pgcli/packages/parseutils/meta.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/packages/parseutils/meta.py	2025-12-24 23:39:20.000000000 +0000
@@ -1,8 +1,6 @@
 from collections import namedtuple
 
-_ColumnMetadata = namedtuple(
-    "ColumnMetadata", ["name", "datatype", "foreignkeys", "default", "has_default"]
-)
+_ColumnMetadata = namedtuple("ColumnMetadata", ["name", "datatype", "foreignkeys", "default", "has_default"])
 
 
 def ColumnMetadata(name, datatype, foreignkeys=None, default=None, has_default=False):
@@ -143,11 +141,7 @@ class FunctionMetadata:
             num_args = len(args)
             num_defaults = len(self.arg_defaults)
             has_default = num + num_defaults >= num_args
-            default = (
-                self.arg_defaults[num - num_args + num_defaults]
-                if has_default
-                else None
-            )
+            default = self.arg_defaults[num - num_args + num_defaults] if has_default else None
             return ColumnMetadata(name, typ, [], default, has_default)
 
         return [arg(name, typ, num) for num, (name, typ) in enumerate(args)]
diff -pruN 4.3.0-3/pgcli/packages/parseutils/tables.py 4.4.0-1/pgcli/packages/parseutils/tables.py
--- 4.3.0-3/pgcli/packages/parseutils/tables.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/packages/parseutils/tables.py	2025-12-24 23:39:20.000000000 +0000
@@ -3,16 +3,9 @@ from collections import namedtuple
 from sqlparse.sql import IdentifierList, Identifier, Function
 from sqlparse.tokens import Keyword, DML, Punctuation
 
-TableReference = namedtuple(
-    "TableReference", ["schema", "name", "alias", "is_function"]
-)
+TableReference = namedtuple("TableReference", ["schema", "name", "alias", "is_function"])
 TableReference.ref = property(
-    lambda self: self.alias
-    or (
-        self.name
-        if self.name.islower() or self.name[0] == '"'
-        else '"' + self.name + '"'
-    )
+    lambda self: self.alias or (self.name if self.name.islower() or self.name[0] == '"' else '"' + self.name + '"')
 )
 
 
@@ -53,11 +46,7 @@ def extract_from_part(parsed, stop_at_pu
             # Also 'SELECT * FROM abc JOIN def' will trigger this elif
             # condition. So we need to ignore the keyword JOIN and its variants
             # INNER JOIN, FULL OUTER JOIN, etc.
-            elif (
-                item.ttype is Keyword
-                and (not item.value.upper() == "FROM")
-                and (not item.value.upper().endswith("JOIN"))
-            ):
+            elif item.ttype is Keyword and (not item.value.upper() == "FROM") and (not item.value.upper().endswith("JOIN")):
                 tbl_prefix_seen = False
             else:
                 yield item
@@ -116,15 +105,11 @@ def extract_table_identifiers(token_stre
                     try:
                         schema_name = identifier.get_parent_name()
                         real_name = identifier.get_real_name()
-                        is_function = allow_functions and _identifier_is_function(
-                            identifier
-                        )
+                        is_function = allow_functions and _identifier_is_function(identifier)
                     except AttributeError:
                         continue
                     if real_name:
-                        yield TableReference(
-                            schema_name, real_name, identifier.get_alias(), is_function
-                        )
+                        yield TableReference(schema_name, real_name, identifier.get_alias(), is_function)
             elif isinstance(item, Identifier):
                 schema_name, real_name, alias = parse_identifier(item)
                 is_function = allow_functions and _identifier_is_function(item)
diff -pruN 4.3.0-3/pgcli/packages/parseutils/utils.py 4.4.0-1/pgcli/packages/parseutils/utils.py
--- 4.3.0-3/pgcli/packages/parseutils/utils.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/packages/parseutils/utils.py	2025-12-24 23:39:20.000000000 +0000
@@ -79,9 +79,7 @@ def find_prev_keyword(sql, n_skip=0):
     logical_operators = ("AND", "OR", "NOT", "BETWEEN")
 
     for t in reversed(flattened):
-        if t.value == "(" or (
-            t.is_keyword and (t.value.upper() not in logical_operators)
-        ):
+        if t.value == "(" or (t.is_keyword and (t.value.upper() not in logical_operators)):
             # Find the location of token t in the original parsed statement
             # We can't use parsed.token_index(t) because t may be a child token
             # inside a TokenList, in which case token_index throws an error
diff -pruN 4.3.0-3/pgcli/packages/pgliterals/pgliterals.json 4.4.0-1/pgcli/packages/pgliterals/pgliterals.json
--- 4.3.0-3/pgcli/packages/pgliterals/pgliterals.json	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/packages/pgliterals/pgliterals.json	2025-12-24 23:39:20.000000000 +0000
@@ -227,7 +227,7 @@
         "ROWS": [],
         "SELECT": [],
         "SESSION": [],
-        "SET": [],
+        "SET": ["SEARCH_PATH TO"],
         "SHARE": [],
         "SHOW": [],
         "SIZE": [],
diff -pruN 4.3.0-3/pgcli/packages/sqlcompletion.py 4.4.0-1/pgcli/packages/sqlcompletion.py
--- 4.3.0-3/pgcli/packages/sqlcompletion.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/packages/sqlcompletion.py	2025-12-24 23:39:20.000000000 +0000
@@ -1,4 +1,3 @@
-import sys
 import re
 import sqlparse
 from collections import namedtuple
@@ -27,16 +26,16 @@ Join = namedtuple("Join", ["table_refs",
 
 Function = namedtuple("Function", ["schema", "table_refs", "usage"])
 # For convenience, don't require the `usage` argument in Function constructor
-Function.__new__.__defaults__ = (None, tuple(), None)
-Table.__new__.__defaults__ = (None, tuple(), tuple())
-View.__new__.__defaults__ = (None, tuple())
-FromClauseItem.__new__.__defaults__ = (None, tuple(), tuple())
+Function.__new__.__defaults__ = (None, (), None)
+Table.__new__.__defaults__ = (None, (), ())
+View.__new__.__defaults__ = (None, ())
+FromClauseItem.__new__.__defaults__ = (None, (), ())
 
 Column = namedtuple(
     "Column",
     ["table_refs", "require_last_table", "local_tables", "qualifiable", "context"],
 )
-Column.__new__.__defaults__ = (None, None, tuple(), False, None)
+Column.__new__.__defaults__ = (None, None, (), False, None)
 
 Keyword = namedtuple("Keyword", ["last_token"])
 Keyword.__new__.__defaults__ = (None,)
@@ -50,15 +49,11 @@ Path = namedtuple("Path", [])
 class SqlStatement:
     def __init__(self, full_text, text_before_cursor):
         self.identifier = None
-        self.word_before_cursor = word_before_cursor = last_word(
-            text_before_cursor, include="many_punctuations"
-        )
+        self.word_before_cursor = word_before_cursor = last_word(text_before_cursor, include="many_punctuations")
         full_text = _strip_named_query(full_text)
         text_before_cursor = _strip_named_query(text_before_cursor)
 
-        full_text, text_before_cursor, self.local_tables = isolate_query_ctes(
-            full_text, text_before_cursor
-        )
+        full_text, text_before_cursor, self.local_tables = isolate_query_ctes(full_text, text_before_cursor)
 
         self.text_before_cursor_including_last_word = text_before_cursor
 
@@ -78,9 +73,7 @@ class SqlStatement:
         else:
             parsed = sqlparse.parse(text_before_cursor)
 
-        full_text, text_before_cursor, parsed = _split_multiple_statements(
-            full_text, text_before_cursor, parsed
-        )
+        full_text, text_before_cursor, parsed = _split_multiple_statements(full_text, text_before_cursor, parsed)
 
         self.full_text = full_text
         self.text_before_cursor = text_before_cursor
@@ -98,9 +91,7 @@ class SqlStatement:
         If 'before', only tables before the cursor are returned.
         If not 'insert' and the stmt is an insert, the first table is skipped.
         """
-        tables = extract_tables(
-            self.full_text if scope == "full" else self.text_before_cursor
-        )
+        tables = extract_tables(self.full_text if scope == "full" else self.text_before_cursor)
         if scope == "insert":
             tables = tables[:1]
         elif self.is_insert():
@@ -119,9 +110,7 @@ class SqlStatement:
         return schema
 
     def reduce_to_prev_keyword(self, n_skip=0):
-        prev_keyword, self.text_before_cursor = find_prev_keyword(
-            self.text_before_cursor, n_skip=n_skip
-        )
+        prev_keyword, self.text_before_cursor = find_prev_keyword(self.text_before_cursor, n_skip=n_skip)
         return prev_keyword
 
 
@@ -222,9 +211,7 @@ def _split_multiple_statements(full_text
             token1_idx = statement.token_index(token1)
             token2 = statement.token_next(token1_idx)[1]
     if token2 and token2.value.upper() == "FUNCTION":
-        full_text, text_before_cursor, statement = _statement_from_function(
-            full_text, text_before_cursor, statement
-        )
+        full_text, text_before_cursor, statement = _statement_from_function(full_text, text_before_cursor, statement)
     return full_text, text_before_cursor, statement
 
 
@@ -361,11 +348,7 @@ def suggest_based_on_last_token(token, s
         # Get the token before the parens
         prev_tok = p.token_prev(len(p.tokens) - 1)[1]
 
-        if (
-            prev_tok
-            and prev_tok.value
-            and prev_tok.value.lower().split(" ")[-1] == "using"
-        ):
+        if prev_tok and prev_tok.value and prev_tok.value.lower().split(" ")[-1] == "using":
             # tbl1 INNER JOIN tbl2 USING (col1, col2)
             tables = stmt.get_tables("before")
 
@@ -389,15 +372,21 @@ def suggest_based_on_last_token(token, s
         # We're probably in a function argument list
         return _suggest_expression(token_v, stmt)
     elif token_v == "set":
+        # "set" for changing a run-time parameter
+        p = sqlparse.parse(stmt.text_before_cursor)[0]
+        is_first_token = p.token_first().value.upper() == token_v.upper()
+        if is_first_token:
+            return (Keyword(token_v.upper()),)
+
+        # E.g. 'UPDATE foo SET'
         return (Column(table_refs=stmt.get_tables(), local_tables=stmt.local_tables),)
+
     elif token_v in ("select", "where", "having", "order by", "distinct"):
         return _suggest_expression(token_v, stmt)
     elif token_v == "as":
         # Don't suggest anything for aliases
         return ()
-    elif (token_v.endswith("join") and token.is_keyword) or (
-        token_v in ("copy", "from", "update", "into", "describe", "truncate")
-    ):
+    elif (token_v.endswith("join") and token.is_keyword) or (token_v in ("copy", "from", "update", "into", "describe", "truncate")):
         schema = stmt.get_identifier_schema()
         tables = extract_tables(stmt.text_before_cursor)
         is_join = token_v.endswith("join") and token.is_keyword
@@ -411,11 +400,7 @@ def suggest_based_on_last_token(token, s
             suggest.insert(0, Schema())
 
         if token_v == "from" or is_join:
-            suggest.append(
-                FromClauseItem(
-                    schema=schema, table_refs=tables, local_tables=stmt.local_tables
-                )
-            )
+            suggest.append(FromClauseItem(schema=schema, table_refs=tables, local_tables=stmt.local_tables))
         elif token_v == "truncate":
             suggest.append(Table(schema))
         else:
@@ -447,7 +432,7 @@ def suggest_based_on_last_token(token, s
 
         except ValueError:
             pass
-        return tuple()
+        return ()
 
     elif token_v in ("table", "view"):
         # E.g. 'ALTER TABLE <tablname>'
@@ -517,6 +502,9 @@ def suggest_based_on_last_token(token, s
         return tuple(suggestions)
     elif token_v in {"alter", "create", "drop"}:
         return (Keyword(token_v.upper()),)
+    elif token_v == "to":
+        # E.g. 'SET search_path TO'
+        return (Schema(),)
     elif token.is_keyword:
         # token is a keyword we haven't implemented any special handling for
         # go backwards in the query until we find one we do recognize
@@ -553,14 +541,10 @@ def _suggest_expression(token_v, stmt):
     )
 
 
-def identifies(id, ref):
+def identifies(table_id, ref):
     """Returns true if string `id` matches TableReference `ref`"""
 
-    return (
-        id == ref.alias
-        or id == ref.name
-        or (ref.schema and (id == ref.schema + "." + ref.name))
-    )
+    return table_id == ref.alias or table_id == ref.name or (ref.schema and (table_id == ref.schema + "." + ref.name))
 
 
 def _allow_join_condition(statement):
diff -pruN 4.3.0-3/pgcli/pgbuffer.py 4.4.0-1/pgcli/pgbuffer.py
--- 4.3.0-3/pgcli/pgbuffer.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/pgbuffer.py	2025-12-24 23:39:20.000000000 +0000
@@ -25,9 +25,7 @@ mode, which by default will insert new l
 def safe_multi_line_mode(pgcli):
     @Condition
     def cond():
-        _logger.debug(
-            'Multi-line mode state: "%s" / "%s"', pgcli.multi_line, pgcli.multiline_mode
-        )
+        _logger.debug('Multi-line mode state: "%s" / "%s"', pgcli.multi_line, pgcli.multiline_mode)
         return pgcli.multi_line and (pgcli.multiline_mode == "safe")
 
     return cond
@@ -48,14 +46,13 @@ def buffer_should_be_handled(pgcli):
         text = doc.text.strip()
 
         return (
-            text.startswith("\\")  # Special Command
-            or text.endswith(r"\e")  # Special Command
-            or text.endswith(r"\G")  # Ended with \e which should launch the editor
-            or _is_complete(text)  # A complete SQL command
-            or (text == "exit")  # Exit doesn't need semi-colon
-            or (text == "quit")  # Quit doesn't need semi-colon
-            or (text == ":q")  # To all the vim fans out there
-            or (text == "")  # Just a plain enter without any text
+            text.startswith("\\")
+            or text.endswith((r"\e", r"\G"))
+            or _is_complete(text)
+            or text == "exit"
+            or text == "quit"
+            or text == ":q"
+            or text == ""  # Just a plain enter without any text
         )
 
     return cond
diff -pruN 4.3.0-3/pgcli/pgclirc 4.4.0-1/pgcli/pgclirc
--- 4.3.0-3/pgcli/pgclirc	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/pgclirc	2025-12-24 23:39:20.000000000 +0000
@@ -232,12 +232,21 @@ output.null = "#808080"
 
 # Named queries are queries you can execute by name.
 [named queries]
+# ver = "SELECT version()"
 
 # Here's where you can provide a list of connection string aliases.
 # You can use it by passing the -D option. `pgcli -D example_dsn`
 [alias_dsn]
 # example_dsn = postgresql://[user[:password]@][netloc][:port][/dbname]
 
+# Initial commands to execute when connecting to any database.
+[init-commands]
+# example = "SET search_path TO myschema"
+
+# Initial commands to execute when connecting to a DSN alias.
+[alias_dsn.init-commands]
+# example_dsn = "SET search_path TO otherschema; SET timezone TO 'UTC'"
+
 # Format for number representation
 # for decimal "d" - 12345678, ",d" - 12,345,678
 # for float "g" - 123456.78, ",g" - 123,456.78
@@ -249,3 +258,12 @@ float = ""
 [column_date_formats]
 # use strftime format, e.g.
 # created = "%Y-%m-%d"
+
+# Per host ssh tunnel configuration
+[ssh tunnels]
+# ^example.*\.host$ = myuser:mypasswd@my.tunnel.com:4000
+# .*\.net = another.tunnel.com
+
+# Per dsn_alias ssh tunnel configuration
+[dsn ssh tunnels]
+# ^example_dsn$  = myuser:mypasswd@my.tunnel.com:4000
diff -pruN 4.3.0-3/pgcli/pgcompleter.py 4.4.0-1/pgcli/pgcompleter.py
--- 4.3.0-3/pgcli/pgcompleter.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/pgcompleter.py	2025-12-24 23:39:20.000000000 +0000
@@ -1,7 +1,7 @@
 import json
 import logging
 import re
-from itertools import count, repeat, chain
+from itertools import count, chain
 import operator
 from collections import namedtuple, defaultdict, OrderedDict
 from cli_helpers.tabular_output import TabularOutputFormatter
@@ -32,7 +32,6 @@ from .packages.parseutils.utils import l
 from .packages.parseutils.tables import TableReference
 from .packages.pgliterals.main import get_literals
 from .packages.prioritization import PrevalenceCounter
-from .config import load_config, config_location
 
 _logger = logging.getLogger(__name__)
 
@@ -48,18 +47,16 @@ def SchemaObject(name, schema=None, meta
 _Candidate = namedtuple("Candidate", "completion prio meta synonyms prio2 display")
 
 
-def Candidate(
-    completion, prio=None, meta=None, synonyms=None, prio2=None, display=None
-):
-    return _Candidate(
-        completion, prio, meta, synonyms or [completion], prio2, display or completion
-    )
+def Candidate(completion, prio=None, meta=None, synonyms=None, prio2=None, display=None):
+    return _Candidate(completion, prio, meta, synonyms or [completion], prio2, display or completion)
 
 
 # Used to strip trailing '::some_type' from default-value expressions
 arg_default_type_strip_regex = re.compile(r"::[\w\.]+(\[\])?$")
 
-normalize_ref = lambda ref: ref if ref[0] == '"' else '"' + ref.lower() + '"'
+
+def normalize_ref(ref):
+    return ref if ref[0] == '"' else '"' + ref.lower() + '"'
 
 
 def generate_alias(tbl, alias_map=None):
@@ -77,10 +74,7 @@ def generate_alias(tbl, alias_map=None):
     """
     if alias_map and tbl in alias_map:
         return alias_map[tbl]
-    return "".join(
-        [l for l in tbl if l.isupper()]
-        or [l for l, prev in zip(tbl, "_" + tbl) if prev == "_" and l != "_"]
-    )
+    return "".join([l for l in tbl if l.isupper()] or [l for l, prev in zip(tbl, "_" + tbl) if prev == "_" and l != "_"])
 
 
 class InvalidMapFile(ValueError):
@@ -92,9 +86,7 @@ def load_alias_map_file(path):
         with open(path) as fo:
             alias_map = json.load(fo)
     except FileNotFoundError as err:
-        raise InvalidMapFile(
-            f"Cannot read alias_map_file - {err.filename} does not exist"
-        )
+        raise InvalidMapFile(f"Cannot read alias_map_file - {err.filename} does not exist")
     except json.JSONDecodeError:
         raise InvalidMapFile(f"Cannot read alias_map_file - {path} is not valid json")
     else:
@@ -116,15 +108,9 @@ class PGCompleter(Completer):
         self.pgspecial = pgspecial
         self.prioritizer = PrevalenceCounter()
         settings = settings or {}
-        self.signature_arg_style = settings.get(
-            "signature_arg_style", "{arg_name} {arg_type}"
-        )
-        self.call_arg_style = settings.get(
-            "call_arg_style", "{arg_name: <{max_arg_len}} := {arg_default}"
-        )
-        self.call_arg_display_style = settings.get(
-            "call_arg_display_style", "{arg_name}"
-        )
+        self.signature_arg_style = settings.get("signature_arg_style", "{arg_name} {arg_type}")
+        self.call_arg_style = settings.get("call_arg_style", "{arg_name: <{max_arg_len}} := {arg_default}")
+        self.call_arg_display_style = settings.get("call_arg_display_style", "{arg_name}")
         self.call_arg_oneliner_max = settings.get("call_arg_oneliner_max", 2)
         self.search_path_filter = settings.get("search_path_filter")
         self.generate_aliases = settings.get("generate_aliases")
@@ -135,16 +121,11 @@ class PGCompleter(Completer):
             self.alias_map = None
         self.casing_file = settings.get("casing_file")
         self.insert_col_skip_patterns = [
-            re.compile(pattern)
-            for pattern in settings.get(
-                "insert_col_skip_patterns", [r"^now\(\)$", r"^nextval\("]
-            )
+            re.compile(pattern) for pattern in settings.get("insert_col_skip_patterns", [r"^now\(\)$", r"^nextval\("])
         ]
         self.generate_casing_file = settings.get("generate_casing_file")
         self.qualify_columns = settings.get("qualify_columns", "if_more_than_one_table")
-        self.asterisk_column_order = settings.get(
-            "asterisk_column_order", "table_order"
-        )
+        self.asterisk_column_order = settings.get("asterisk_column_order", "table_order")
 
         keyword_casing = settings.get("keyword_casing", "upper").lower()
         if keyword_casing not in ("upper", "lower", "auto"):
@@ -160,11 +141,7 @@ class PGCompleter(Completer):
         self.all_completions = set(self.keywords + self.functions)
 
     def escape_name(self, name):
-        if name and (
-            (not self.name_pattern.match(name))
-            or (name.upper() in self.reserved_words)
-            or (name.upper() in self.functions)
-        ):
+        if name and ((not self.name_pattern.match(name)) or (name.upper() in self.reserved_words) or (name.upper() in self.functions)):
             name = '"%s"' % name
 
         return name
@@ -230,9 +207,7 @@ class PGCompleter(Completer):
             try:
                 metadata[schema][relname] = OrderedDict()
             except KeyError:
-                _logger.error(
-                    "%r %r listed in unrecognized schema %r", kind, relname, schema
-                )
+                _logger.error("%r %r listed in unrecognized schema %r", kind, relname, schema)
             self.all_completions.add(relname)
 
     def extend_columns(self, column_data, kind):
@@ -306,9 +281,7 @@ class PGCompleter(Completer):
             childcol, parcol = e([fk.childcolumn, fk.parentcolumn])
             childcolmeta = meta[childschema][childtable][childcol]
             parcolmeta = meta[parentschema][parenttable][parcol]
-            fk = ForeignKey(
-                parentschema, parenttable, parcol, childschema, childtable, childcol
-            )
+            fk = ForeignKey(parentschema, parenttable, parcol, childschema, childtable, childcol)
             childcolmeta.foreignkeys.append(fk)
             parcolmeta.foreignkeys.append(fk)
 
@@ -452,12 +425,7 @@ class PGCompleter(Completer):
                 # We also use the unescape_name to make sure quoted names have
                 # the same priority as unquoted names.
                 lexical_priority = (
-                    tuple(
-                        0 if c in " _" else -ord(c)
-                        for c in self.unescape_name(item.lower())
-                    )
-                    + (1,)
-                    + tuple(c for c in item)
+                    tuple(0 if c in " _" else -ord(c) for c in self.unescape_name(item.lower())) + (1,) + tuple(c for c in item)
                 )
 
                 item = self.case(item)
@@ -495,9 +463,7 @@ class PGCompleter(Completer):
         # If smart_completion is off then match any word that starts with
         # 'word_before_cursor'.
         if not smart_completion:
-            matches = self.find_matches(
-                word_before_cursor, self.all_completions, mode="strict"
-            )
+            matches = self.find_matches(word_before_cursor, self.all_completions, mode="strict")
             completions = [m.completion for m in matches]
             return sorted(completions, key=operator.attrgetter("text"))
 
@@ -528,9 +494,10 @@ class PGCompleter(Completer):
                 "if_more_than_one_table": len(tables) > 1,
             }[self.qualify_columns]
         )
-        qualify = lambda col, tbl: (
-            (tbl + "." + self.case(col)) if do_qualify else self.case(col)
-        )
+
+        def qualify(col, tbl):
+            return (tbl + "." + self.case(col)) if do_qualify else self.case(col)
+
         _logger.debug("Completion column scope: %r", tables)
         scoped_cols = self.populate_scoped_cols(tables, suggestion.local_tables)
 
@@ -539,61 +506,38 @@ class PGCompleter(Completer):
             return Candidate(qualify(name, ref), 0, "column", synonyms)
 
         def flat_cols():
-            return [
-                make_cand(c.name, t.ref)
-                for t, cols in scoped_cols.items()
-                for c in cols
-            ]
+            return [make_cand(c.name, t.ref) for t, cols in scoped_cols.items() for c in cols]
 
         if suggestion.require_last_table:
             # require_last_table is used for 'tb11 JOIN tbl2 USING (...' which should
             # suggest only columns that appear in the last table and one more
             ltbl = tables[-1].ref
-            other_tbl_cols = {
-                c.name for t, cs in scoped_cols.items() if t.ref != ltbl for c in cs
-            }
-            scoped_cols = {
-                t: [col for col in cols if col.name in other_tbl_cols]
-                for t, cols in scoped_cols.items()
-                if t.ref == ltbl
-            }
+            other_tbl_cols = {c.name for t, cs in scoped_cols.items() if t.ref != ltbl for c in cs}
+            scoped_cols = {t: [col for col in cols if col.name in other_tbl_cols] for t, cols in scoped_cols.items() if t.ref == ltbl}
         lastword = last_word(word_before_cursor, include="most_punctuations")
         if lastword == "*":
             if suggestion.context == "insert":
 
-                def filter(col):
+                def _filter(col):
                     if not col.has_default:
                         return True
-                    return not any(
-                        p.match(col.default) for p in self.insert_col_skip_patterns
-                    )
+                    return not any(p.match(col.default) for p in self.insert_col_skip_patterns)
 
-                scoped_cols = {
-                    t: [col for col in cols if filter(col)]
-                    for t, cols in scoped_cols.items()
-                }
+                scoped_cols = {t: [col for col in cols if _filter(col)] for t, cols in scoped_cols.items()}
             if self.asterisk_column_order == "alphabetic":
                 for cols in scoped_cols.values():
                     cols.sort(key=operator.attrgetter("name"))
-            if (
-                lastword != word_before_cursor
-                and len(tables) == 1
-                and word_before_cursor[-len(lastword) - 1] == "."
-            ):
+            if lastword != word_before_cursor and len(tables) == 1 and word_before_cursor[-len(lastword) - 1] == ".":
                 # User typed x.*; replicate "x." for all columns except the
                 # first, which gets the original (as we only replace the "*"")
                 sep = ", " + word_before_cursor[:-1]
                 collist = sep.join(self.case(c.completion) for c in flat_cols())
             else:
-                collist = ", ".join(
-                    qualify(c.name, t.ref) for t, cs in scoped_cols.items() for c in cs
-                )
+                collist = ", ".join(qualify(c.name, t.ref) for t, cs in scoped_cols.items() for c in cs)
 
             return [
                 Match(
-                    completion=Completion(
-                        collist, -1, display_meta="columns", display="*"
-                    ),
+                    completion=Completion(collist, -1, display_meta="columns", display="*"),
                     priority=(1, 1, 1),
                 )
             ]
@@ -627,12 +571,7 @@ class PGCompleter(Completer):
         other_tbls = {(t.schema, t.name) for t in list(cols)[:-1]}
         joins = []
         # Iterate over FKs in existing tables to find potential joins
-        fks = (
-            (fk, rtbl, rcol)
-            for rtbl, rcols in cols.items()
-            for rcol in rcols
-            for fk in rcol.foreignkeys
-        )
+        fks = ((fk, rtbl, rcol) for rtbl, rcols in cols.items() for rcol in rcols for fk in rcol.foreignkeys)
         col = namedtuple("col", "schema tbl col")
         for fk, rtbl, rcol in fks:
             right = col(rtbl.schema, rtbl.name, rcol.name)
@@ -644,31 +583,21 @@ class PGCompleter(Completer):
             c = self.case
             if self.generate_aliases or normalize_ref(left.tbl) in refs:
                 lref = self.alias(left.tbl, suggestion.table_refs)
-                join = "{0} {4} ON {4}.{1} = {2}.{3}".format(
-                    c(left.tbl), c(left.col), rtbl.ref, c(right.col), lref
-                )
+                join = "{0} {4} ON {4}.{1} = {2}.{3}".format(c(left.tbl), c(left.col), rtbl.ref, c(right.col), lref)
             else:
-                join = "{0} ON {0}.{1} = {2}.{3}".format(
-                    c(left.tbl), c(left.col), rtbl.ref, c(right.col)
-                )
+                join = "{0} ON {0}.{1} = {2}.{3}".format(c(left.tbl), c(left.col), rtbl.ref, c(right.col))
             alias = generate_alias(self.case(left.tbl), alias_map=self.alias_map)
             synonyms = [
                 join,
-                "{0} ON {0}.{1} = {2}.{3}".format(
-                    alias, c(left.col), rtbl.ref, c(right.col)
-                ),
+                "{0} ON {0}.{1} = {2}.{3}".format(alias, c(left.col), rtbl.ref, c(right.col)),
             ]
             # Schema-qualify if (1) new table in same schema as old, and old
             # is schema-qualified, or (2) new in other schema, except public
             if not suggestion.schema and (
-                qualified[normalize_ref(rtbl.ref)]
-                and left.schema == right.schema
-                or left.schema not in (right.schema, "public")
+                qualified[normalize_ref(rtbl.ref)] and left.schema == right.schema or left.schema not in (right.schema, "public")
             ):
                 join = left.schema + "." + join
-            prio = ref_prio[normalize_ref(rtbl.ref)] * 2 + (
-                0 if (left.schema, left.tbl) in other_tbls else 1
-            )
+            prio = ref_prio[normalize_ref(rtbl.ref)] * 2 + (0 if (left.schema, left.tbl) in other_tbls else 1)
             joins.append(Candidate(join, prio, "join", synonyms=synonyms))
 
         return self.find_matches(word_before_cursor, joins, meta="join")
@@ -701,9 +630,7 @@ class PGCompleter(Completer):
         # Tables that are closer to the cursor get higher prio
         ref_prio = {tbl.ref: num for num, tbl in enumerate(suggestion.table_refs)}
         # Map (schema, table, col) to tables
-        coldict = list_dict(
-            ((t.schema, t.name, c.name), t) for t, c in cols if t.ref != lref
-        )
+        coldict = list_dict(((t.schema, t.name, c.name), t) for t, c in cols if t.ref != lref)
         # For each fk from the left table, generate a join condition if
         # the other table is also in the scope
         fks = ((fk, lcol.name) for lcol in lcols for fk in lcol.foreignkeys)
@@ -734,24 +661,16 @@ class PGCompleter(Completer):
                     not f.is_aggregate
                     and not f.is_window
                     and not f.is_extension
-                    and (
-                        f.is_public
-                        or f.schema_name in self.search_path
-                        or f.schema_name == suggestion.schema
-                    )
+                    and (f.is_public or f.schema_name in self.search_path or f.schema_name == suggestion.schema)
                 )
 
         else:
             alias = False
 
             def filt(f):
-                return not f.is_extension and (
-                    f.is_public or f.schema_name == suggestion.schema
-                )
+                return not f.is_extension and (f.is_public or f.schema_name == suggestion.schema)
 
-        arg_mode = {"signature": "signature", "special": None}.get(
-            suggestion.usage, "call"
-        )
+        arg_mode = {"signature": "signature", "special": None}.get(suggestion.usage, "call")
 
         # Function overloading means we way have multiple functions of the same
         # name at this point, so keep unique names only
@@ -762,9 +681,7 @@ class PGCompleter(Completer):
 
         if not suggestion.schema and not suggestion.usage:
             # also suggest hardcoded functions using startswith matching
-            predefined_funcs = self.find_matches(
-                word_before_cursor, self.functions, mode="strict", meta="function"
-            )
+            predefined_funcs = self.find_matches(word_before_cursor, self.functions, mode="strict", meta="function")
             matches.extend(predefined_funcs)
 
         return matches
@@ -815,10 +732,7 @@ class PGCompleter(Completer):
             return "()"
         multiline = usage == "call" and len(args) > self.call_arg_oneliner_max
         max_arg_len = max(len(a.name) for a in args) if multiline else 0
-        args = (
-            self._format_arg(template, arg, arg_num + 1, max_arg_len)
-            for arg_num, arg in enumerate(args)
-        )
+        args = (self._format_arg(template, arg, arg_num + 1, max_arg_len) for arg_num, arg in enumerate(args))
         if multiline:
             return "(" + ",".join("\n    " + a for a in args if a) + "\n)"
         else:
@@ -917,15 +831,11 @@ class PGCompleter(Completer):
         else:
             keywords = [k.lower() for k in keywords]
 
-        return self.find_matches(
-            word_before_cursor, keywords, mode="strict", meta="keyword"
-        )
+        return self.find_matches(word_before_cursor, keywords, mode="strict", meta="keyword")
 
     def get_path_matches(self, _, word_before_cursor):
         completer = PathCompleter(expanduser=True)
-        document = Document(
-            text=word_before_cursor, cursor_position=len(word_before_cursor)
-        )
+        document = Document(text=word_before_cursor, cursor_position=len(word_before_cursor))
         for c in completer.get_completions(document, None):
             yield Match(completion=c, priority=(0,))
 
@@ -946,18 +856,12 @@ class PGCompleter(Completer):
 
         if not suggestion.schema:
             # Also suggest hardcoded types
-            matches.extend(
-                self.find_matches(
-                    word_before_cursor, self.datatypes, mode="strict", meta="datatype"
-                )
-            )
+            matches.extend(self.find_matches(word_before_cursor, self.datatypes, mode="strict", meta="datatype"))
 
         return matches
 
     def get_namedquery_matches(self, _, word_before_cursor):
-        return self.find_matches(
-            word_before_cursor, NamedQueries.instance.list(), meta="named query"
-        )
+        return self.find_matches(word_before_cursor, NamedQueries.instance.list(), meta="named query")
 
     suggestion_matchers = {
         FromClauseItem: get_from_clause_item_matches,
@@ -1047,9 +951,7 @@ class PGCompleter(Completer):
         """
 
         return [
-            SchemaObject(
-                name=obj, schema=(self._maybe_schema(schema=sch, parent=schema))
-            )
+            SchemaObject(name=obj, schema=(self._maybe_schema(schema=sch, parent=schema)))
             for sch in self._get_schemas(obj_type, schema)
             for obj in self.dbmetadata[obj_type][sch].keys()
         ]
diff -pruN 4.3.0-3/pgcli/pgexecute.py 4.4.0-1/pgcli/pgexecute.py
--- 4.3.0-3/pgcli/pgexecute.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/pgexecute.py	2025-12-24 23:39:20.000000000 +0000
@@ -13,9 +13,7 @@ from .packages.parseutils.meta import Fu
 
 _logger = logging.getLogger(__name__)
 
-ViewDef = namedtuple(
-    "ViewDef", "nspname relname relkind viewdef reloptions checkoption"
-)
+ViewDef = namedtuple("ViewDef", "nspname relname relkind viewdef reloptions checkoption")
 
 
 # we added this funcion to strip beginning comments
@@ -51,9 +49,7 @@ def register_typecasters(connection):
         "json",
         "jsonb",
     ]:
-        connection.adapters.register_loader(
-            forced_text_type, psycopg.types.string.TextLoader
-        )
+        connection.adapters.register_loader(forced_text_type, psycopg.types.string.TextLoader)
 
 
 # pg3: I don't know what is this
@@ -219,9 +215,7 @@ class PGExecute:
             new_params = {"dsn": new_params["dsn"], "password": new_params["password"]}
 
             if new_params["password"]:
-                new_params["dsn"] = make_conninfo(
-                    new_params["dsn"], password=new_params.pop("password")
-                )
+                new_params["dsn"] = make_conninfo(new_params["dsn"], password=new_params.pop("password"))
 
         conn_params.update({k: v for k, v in new_params.items() if v})
 
@@ -262,11 +256,7 @@ class PGExecute:
         self.extra_args = kwargs
 
         if not self.host:
-            self.host = (
-                "pgbouncer"
-                if self.is_virtual_database()
-                else self.get_socket_directory()
-            )
+            self.host = "pgbouncer" if self.is_virtual_database() else self.get_socket_directory()
 
         self.pid = conn.info.backend_pid
         self.superuser = conn.info.parameter_status("is_superuser") in ("on", "1")
@@ -306,10 +296,7 @@ class PGExecute:
 
     def valid_transaction(self):
         status = self.conn.info.transaction_status
-        return (
-            status == psycopg.pq.TransactionStatus.ACTIVE
-            or status == psycopg.pq.TransactionStatus.INTRANS
-        )
+        return status == psycopg.pq.TransactionStatus.ACTIVE or status == psycopg.pq.TransactionStatus.INTRANS
 
     def run(
         self,
@@ -649,9 +636,7 @@ class PGExecute:
 
     def get_socket_directory(self):
         with self.conn.cursor() as cur:
-            _logger.debug(
-                "Socket directory Query. sql: %r", self.socket_directory_query
-            )
+            _logger.debug("Socket directory Query. sql: %r", self.socket_directory_query)
             cur.execute(self.socket_directory_query)
             result = cur.fetchone()
             return result[0] if result else ""
@@ -889,8 +874,6 @@ class PGExecute:
             return cur.fetchone()[0]
 
     def set_timezone(self, timezone: str):
-        query = psycopg.sql.SQL("set time zone {}").format(
-            psycopg.sql.Identifier(timezone)
-        )
+        query = psycopg.sql.SQL("set time zone {}").format(psycopg.sql.Identifier(timezone))
         with self.conn.cursor() as cur:
             cur.execute(query)
diff -pruN 4.3.0-3/pgcli/pgstyle.py 4.4.0-1/pgcli/pgstyle.py
--- 4.3.0-3/pgcli/pgstyle.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/pgstyle.py	2025-12-24 23:39:20.000000000 +0000
@@ -87,9 +87,7 @@ def style_factory(name, cli_style):
             prompt_styles.append((token, cli_style[token]))
 
     override_style = Style([("bottom-toolbar", "noreverse")])
-    return merge_styles(
-        [style_from_pygments_cls(style), override_style, Style(prompt_styles)]
-    )
+    return merge_styles([style_from_pygments_cls(style), override_style, Style(prompt_styles)])
 
 
 def style_factory_output(name, cli_style):
diff -pruN 4.3.0-3/pgcli/pgtoolbar.py 4.4.0-1/pgcli/pgtoolbar.py
--- 4.3.0-3/pgcli/pgtoolbar.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/pgtoolbar.py	2025-12-24 23:39:20.000000000 +0000
@@ -37,14 +37,10 @@ def create_toolbar_tokens_func(pgcli):
             if pgcli.multiline_mode == "safe":
                 result.append(("class:bottom-toolbar", " ([Esc] [Enter] to execute]) "))
             else:
-                result.append(
-                    ("class:bottom-toolbar", " (Semi-colon [;] will end the line) ")
-                )
+                result.append(("class:bottom-toolbar", " (Semi-colon [;] will end the line) "))
 
         if pgcli.vi_mode:
-            result.append(
-                ("class:bottom-toolbar", "[F4] Vi-mode (" + _get_vi_mode() + ")  ")
-            )
+            result.append(("class:bottom-toolbar", "[F4] Vi-mode (" + _get_vi_mode() + ")  "))
         else:
             result.append(("class:bottom-toolbar", "[F4] Emacs-mode  "))
 
@@ -54,14 +50,10 @@ def create_toolbar_tokens_func(pgcli):
             result.append(("class:bottom-toolbar", "[F5] Explain: OFF "))
 
         if pgcli.pgexecute.failed_transaction():
-            result.append(
-                ("class:bottom-toolbar.transaction.failed", "     Failed transaction")
-            )
+            result.append(("class:bottom-toolbar.transaction.failed", "     Failed transaction"))
 
         if pgcli.pgexecute.valid_transaction():
-            result.append(
-                ("class:bottom-toolbar.transaction.valid", "     Transaction")
-            )
+            result.append(("class:bottom-toolbar.transaction.valid", "     Transaction"))
 
         if pgcli.completion_refresher.is_refreshing():
             result.append(("class:bottom-toolbar", "     Refreshing completions..."))
diff -pruN 4.3.0-3/pgcli/pyev.py 4.4.0-1/pgcli/pyev.py
--- 4.3.0-3/pgcli/pyev.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pgcli/pyev.py	2025-12-24 23:39:20.000000000 +0000
@@ -99,17 +99,13 @@ class Visualizer:
             return plan
 
         if plan["Plan Rows"] != 0:
-            plan["Planner Row Estimate Factor"] = (
-                plan["Actual Rows"] / plan["Plan Rows"]
-            )
+            plan["Planner Row Estimate Factor"] = plan["Actual Rows"] / plan["Plan Rows"]
 
         if plan["Planner Row Estimate Factor"] < 10:
             plan["Planner Row Estimate Factor"] = 0
             plan["Planner Row Estimate Direction"] = "Over"
             if plan["Actual Rows"] != 0:
-                plan["Planner Row Estimate Factor"] = (
-                    plan["Plan Rows"] / plan["Actual Rows"]
-                )
+                plan["Planner Row Estimate Factor"] = plan["Plan Rows"] / plan["Actual Rows"]
         return plan
 
     #
@@ -119,9 +115,7 @@ class Visualizer:
 
         for child in plan.get("Plans", []):
             if child["Node Type"] != "CTEScan":
-                plan["Actual Duration"] = (
-                    plan["Actual Duration"] - child["Actual Total Time"]
-                )
+                plan["Actual Duration"] = plan["Actual Duration"] - child["Actual Total Time"]
                 plan["Actual Cost"] = plan["Actual Cost"] - child["Total Cost"]
 
         if plan["Actual Cost"] < 0:
@@ -243,9 +237,7 @@ class Visualizer:
 
     def create_lines(self, plan, prefix, depth, width, last_child):
         current_prefix = prefix
-        self.string_lines.append(
-            self.output_fn(current_prefix, self.prefix_format("│"))
-        )
+        self.string_lines.append(self.output_fn(current_prefix, self.prefix_format("│")))
 
         joint = "├"
         if last_child:
@@ -277,9 +269,7 @@ class Visualizer:
             DESCRIPTIONS.get(plan["Node Type"], "Not found : %s" % plan["Node Type"]),
             cols,
         ):
-            self.string_lines.append(
-                self.output_fn(current_prefix, "%s" % self.muted_format(line))
-            )
+            self.string_lines.append(self.output_fn(current_prefix, "%s" % self.muted_format(line)))
         #
         if plan.get("Actual Duration"):
             self.string_lines.append(
@@ -289,8 +279,7 @@ class Visualizer:
                     % (
                         "Duration:",
                         self.duration_to_string(plan["Actual Duration"]),
-                        (plan["Actual Duration"] / self.explain["Execution Time"])
-                        * 100,
+                        (plan["Actual Duration"] / self.explain["Execution Time"]) * 100,
                     ),
                 )
             )
@@ -361,9 +350,7 @@ class Visualizer:
                     % (
                         self.muted_format("filter"),
                         plan["Filter"],
-                        self.muted_format(
-                            "[-%s rows]" % self.intcomma(plan["Rows Removed by Filter"])
-                        ),
+                        self.muted_format("[-%s rows]" % self.intcomma(plan["Rows Removed by Filter"])),
                     ),
                 )
             )
@@ -377,9 +364,7 @@ class Visualizer:
             )
 
         if plan.get("CTE Name"):
-            self.string_lines.append(
-                self.output_fn(current_prefix, "CTE %s" % plan["CTE Name"])
-            )
+            self.string_lines.append(self.output_fn(current_prefix, "CTE %s" % plan["CTE Name"]))
 
         if plan.get("Planner Row Estimate Factor") != 0:
             self.string_lines.append(
@@ -398,29 +383,22 @@ class Visualizer:
         current_prefix = prefix
 
         if len(plan.get("Output", [])) > 0:
-            for index, line in enumerate(
-                self.wrap_string(" + ".join(plan["Output"]), cols)
-            ):
+            for index, line in enumerate(self.wrap_string(" + ".join(plan["Output"]), cols)):
                 self.string_lines.append(
                     self.output_fn(
                         current_prefix,
-                        self.prefix_format(self.get_terminator(index, plan))
-                        + self.output_format(line),
+                        self.prefix_format(self.get_terminator(index, plan)) + self.output_format(line),
                     )
                 )
 
         for index, nested_plan in enumerate(plan.get("Plans", [])):
-            self.create_lines(
-                nested_plan, prefix, depth + 1, width, index == len(plan["Plans"]) - 1
-            )
+            self.create_lines(nested_plan, prefix, depth + 1, width, index == len(plan["Plans"]) - 1)
 
     def generate_lines(self):
         self.string_lines = [
             "○ Total Cost: %s" % self.intcomma(self.explain["Total Cost"]),
-            "○ Planning Time: %s"
-            % self.duration_to_string(self.explain["Planning Time"]),
-            "○ Execution Time: %s"
-            % self.duration_to_string(self.explain["Execution Time"]),
+            "○ Planning Time: %s" % self.duration_to_string(self.explain["Planning Time"]),
+            "○ Execution Time: %s" % self.duration_to_string(self.explain["Execution Time"]),
             self.prefix_format("┬"),
         ]
         self.create_lines(
diff -pruN 4.3.0-3/pyproject.toml 4.4.0-1/pyproject.toml
--- 4.3.0-3/pyproject.toml	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/pyproject.toml	2025-12-24 23:39:20.000000000 +0000
@@ -25,8 +25,8 @@ urls = { Homepage = "https://pgcli.com"
 requires-python = ">=3.9"
 dependencies = [
     "pgspecial>=2.0.0",
-    "click >= 4.1",
-    "Pygments>=2.0",  # Pygments has to be Capitalcased. WTF?
+    "click >= 4.1,<8.1.8",
+    "Pygments>=2.0",       # Pygments has to be Capitalcased.
     # We still need to use pt-2 unless pt-3 released on Fedora32
     # see: https://github.com/dbcli/pgcli/pull/1197
     "prompt_toolkit>=2.0.6,<4.0.0",
@@ -51,11 +51,25 @@ pgcli = "pgcli.main:cli"
 [project.optional-dependencies]
 keyring = ["keyring >= 12.2.0"]
 sshtunnel = ["sshtunnel >= 0.4.0"]
+dev = [
+    "behave>=1.2.4",
+    "coverage>=7.2.7",
+    "docutils>=0.13.1",
+    "keyrings.alt>=3.1",
+    "pexpect>=4.9.0; platform_system != 'Windows'",
+    "pytest>=7.4.4",
+    "pytest-cov>=4.1.0",
+    "ruff>=0.11.7",
+    "sshtunnel>=0.4.0",
+    "tox>=1.9.2",
+]
 
 [build-system]
-requires = ["setuptools>=61.2"]
+requires = ["setuptools>=64.0", "setuptools-scm>=8"]
 build-backend = "setuptools.build_meta"
 
+[tool.setuptools_scm]
+
 [tool.setuptools]
 include-package-data = false
 
@@ -66,29 +80,56 @@ version = { attr = "pgcli.__version__" }
 find = { namespaces = false }
 
 [tool.setuptools.package-data]
-pgcli = [
-    "pgclirc",
-    "packages/pgliterals/pgliterals.json",
-]
-
-[tool.black]
-line-length = 88
-target-version = ['py38']
-include = '\.pyi?$'
-exclude = '''
-/(
-    \.eggs
-  | \.git
-  | \.hg
-  | \.mypy_cache
-  | \.tox
-  | \.venv
-  | \.cache
-  | \.pytest_cache
-  | _build
-  | buck-out
-  | build
-  | dist
-  | tests/data
-)/
-'''
+pgcli = ["pgclirc", "packages/pgliterals/pgliterals.json"]
+
+[tool.ruff]
+target-version = 'py39'
+line-length = 140
+
+[tool.ruff.lint]
+select = [
+    'A',
+#   'I',      # todo enableme imports
+    'E',
+    'W',
+    'F',
+    'C4',
+    'PIE',
+    'TID',
+]
+ignore = [
+    'E401',   # Multiple imports on one line
+    'E402',   # Module level import not at top of file
+    'PIE808', # range() starting with 0
+    # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules
+    'E111',   # indentation-with-invalid-multiple
+    'E114',   # indentation-with-invalid-multiple-comment
+    'E117',   # over-indented
+    'W191',   # tab-indentation
+    'E741',   # ambiguous-variable-name
+    # TODO
+    'PIE796', # todo enableme Enum contains duplicate value
+]
+exclude = [
+    'pgcli/magic.py',
+    'pgcli/pyev.py',
+]
+
+[tool.ruff.lint.isort]
+force-sort-within-sections = true
+known-first-party = [
+    'pgcli',
+    'tests',
+]
+
+[tool.ruff.format]
+preview = true
+quote-style = 'preserve'
+exclude = [
+    'build',
+]
+
+[tool.pytest.ini_options]
+minversion = "6.0"
+addopts = "--capture=sys --showlocals -rxs"
+testpaths = ["tests"]
\ No newline at end of file
diff -pruN 4.3.0-3/release.py 4.4.0-1/release.py
--- 4.3.0-3/release.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/release.py	2025-12-24 23:39:20.000000000 +0000
@@ -45,9 +45,7 @@ def run_step(*args):
 
 
 def version(version_file):
-    _version_re = re.compile(
-        r'__version__\s+=\s+(?P<quote>[\'"])(?P<version>.*)(?P=quote)'
-    )
+    _version_re = re.compile(r'__version__\s+=\s+(?P<quote>[\'"])(?P<version>.*)(?P=quote)')
 
     with io.open(version_file, encoding="utf-8") as f:
         ver = _version_re.search(f.read()).group("version")
@@ -108,9 +106,7 @@ if __name__ == "__main__":
         action="store_true",
         dest="confirm_steps",
         default=False,
-        help=(
-            "Confirm every step. If the step is not " "confirmed, it will be skipped."
-        ),
+        help=("Confirm every step. If the step is not confirmed, it will be skipped."),
     )
     parser.add_option(
         "-d",
diff -pruN 4.3.0-3/requirements-dev.txt 4.4.0-1/requirements-dev.txt
--- 4.3.0-3/requirements-dev.txt	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/requirements-dev.txt	1970-01-01 00:00:00.000000000 +0000
@@ -1,13 +0,0 @@
-pytest>=2.7.0
-tox>=1.9.2
-behave>=1.2.4
-black>=23.3.0
-pexpect==3.3; platform_system != "Windows"
-pre-commit>=1.16.0
-coverage>=5.0.4
-codecov>=1.5.1
-docutils>=0.13.1
-autopep8>=1.3.3
-twine>=1.11.0
-wheel>=0.33.6
-sshtunnel>=0.4.0
diff -pruN 4.3.0-3/tests/features/db_utils.py 4.4.0-1/tests/features/db_utils.py
--- 4.3.0-3/tests/features/db_utils.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/features/db_utils.py	2025-12-24 23:39:20.000000000 +0000
@@ -1,9 +1,7 @@
 from psycopg import connect
 
 
-def create_db(
-    hostname="localhost", username=None, password=None, dbname=None, port=None
-):
+def create_db(hostname="localhost", username=None, password=None, dbname=None, port=None):
     """Create test database.
 
     :param hostname: string
@@ -36,9 +34,7 @@ def create_cn(hostname, password, userna
     :param dbname: string
     :return: psycopg2.connection
     """
-    cn = connect(
-        host=hostname, user=username, dbname=dbname, password=password, port=port
-    )
+    cn = connect(host=hostname, user=username, dbname=dbname, password=password, port=port)
 
     print(f"Created connection: {cn.info.get_parameters()}.")
     return cn
@@ -49,7 +45,7 @@ def pgbouncer_available(hostname="localh
     try:
         cn = create_cn(hostname, password, username, "pgbouncer", 6432)
         return True
-    except:
+    except Exception:
         print("Pgbouncer is not available.")
     finally:
         if cn:
diff -pruN 4.3.0-3/tests/features/environment.py 4.4.0-1/tests/features/environment.py
--- 4.3.0-3/tests/features/environment.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/features/environment.py	2025-12-24 23:39:20.000000000 +0000
@@ -1,13 +1,13 @@
 import copy
 import os
+import shutil
+import signal
 import sys
+import tempfile
+
 import db_utils as dbutils
 import fixture_utils as fixutils
 import pexpect
-import tempfile
-import shutil
-import signal
-
 
 from steps import wrappers
 
@@ -22,17 +22,13 @@ def before_all(context):
     os.environ["VISUAL"] = "ex"
     os.environ["PROMPT_TOOLKIT_NO_CPR"] = "1"
 
-    context.package_root = os.path.abspath(
-        os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
-    )
+    context.package_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
     fixture_dir = os.path.join(context.package_root, "tests/features/fixture_data")
 
     print("package root:", context.package_root)
     print("fixture dir:", fixture_dir)
 
-    os.environ["COVERAGE_PROCESS_START"] = os.path.join(
-        context.package_root, ".coveragerc"
-    )
+    os.environ["COVERAGE_PROCESS_START"] = os.path.join(context.package_root, ".coveragerc")
 
     context.exit_sent = False
 
@@ -42,30 +38,20 @@ def before_all(context):
 
     # Store get params from config.
     context.conf = {
-        "host": context.config.userdata.get(
-            "pg_test_host", os.getenv("PGHOST", "localhost")
-        ),
-        "user": context.config.userdata.get(
-            "pg_test_user", os.getenv("PGUSER", "postgres")
-        ),
-        "pass": context.config.userdata.get(
-            "pg_test_pass", os.getenv("PGPASSWORD", None)
-        ),
-        "port": context.config.userdata.get(
-            "pg_test_port", os.getenv("PGPORT", "5432")
-        ),
+        "host": context.config.userdata.get("pg_test_host", os.getenv("PGHOST", "localhost")),
+        "user": context.config.userdata.get("pg_test_user", os.getenv("PGUSER", "postgres")),
+        "pass": context.config.userdata.get("pg_test_pass", os.getenv("PGPASSWORD", None)),
+        "port": context.config.userdata.get("pg_test_port", os.getenv("PGPORT", "5432")),
         "cli_command": (
             context.config.userdata.get("pg_cli_command", None)
             or '{python} -c "{startup}"'.format(
                 python=sys.executable,
-                startup="; ".join(
-                    [
-                        "import coverage",
-                        "coverage.process_startup()",
-                        "import pgcli.main",
-                        "pgcli.main.cli(auto_envvar_prefix='BEHAVE')",
-                    ]
-                ),
+                startup="; ".join([
+                    "import coverage",
+                    "coverage.process_startup()",
+                    "import pgcli.main",
+                    "pgcli.main.cli(auto_envvar_prefix='BEHAVE')",
+                ]),
             )
         ),
         "dbname": db_name_full,
@@ -165,15 +151,16 @@ def before_step(context, _):
 
 
 def is_known_problem(scenario):
-    """TODO: why is this not working in 3.12?"""
-    if sys.version_info >= (3, 12):
-        return scenario.name in (
-            'interrupt current query via "ctrl + c"',
-            "run the cli with --username",
-            "run the cli with --user",
-            "run the cli with --port",
-        )
-    return False
+    """TODO: can we fix this?"""
+    return scenario.name in (
+        'interrupt current query via "ctrl + c"',
+        "run the cli with --username",
+        "run the cli with --user",
+        "run the cli with --port",
+        "confirm exit when a transaction is ongoing",
+        "cancel exit when a transaction is ongoing",
+        "run the cli and exit",
+    )
 
 
 def before_scenario(context, scenario):
diff -pruN 4.3.0-3/tests/features/steps/basic_commands.py 4.4.0-1/tests/features/steps/basic_commands.py
--- 4.3.0-3/tests/features/steps/basic_commands.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/features/steps/basic_commands.py	2025-12-24 23:39:20.000000000 +0000
@@ -36,7 +36,7 @@ def step_ping_database(context):
 def step_get_pong_response(context):
     # exit code 0 is implied by the presence of cmd_output here, which
     # is only set on a successful run.
-    assert context.cmd_output.strip() == b"PONG", f"Output was {context.cmd_output}"
+    assert b"PONG" in context.cmd_output.strip(), f"Output was {context.cmd_output}"
 
 
 @when("we run dbcli")
@@ -62,9 +62,7 @@ def step_run_cli_using_arg(context, arg)
         arg = "service=mock_postgres --password"
         prompt_check = False
         currentdb = "postgres"
-    wrappers.run_cli(
-        context, run_args=[arg], prompt_check=prompt_check, currentdb=currentdb
-    )
+    wrappers.run_cli(context, run_args=[arg], prompt_check=prompt_check, currentdb=currentdb)
 
 
 @when("we wait for prompt")
@@ -188,9 +186,7 @@ def step_send_source_command(context):
 
 @when("we run query to check application_name")
 def step_check_application_name(context):
-    context.cli.sendline(
-        "SELECT 'found' FROM pg_stat_activity WHERE application_name = 'pgcli' HAVING COUNT(*) > 0;"
-    )
+    context.cli.sendline("SELECT 'found' FROM pg_stat_activity WHERE application_name = 'pgcli' HAVING COUNT(*) > 0;")
 
 
 @then("we see found")
diff -pruN 4.3.0-3/tests/features/steps/crud_table.py 4.4.0-1/tests/features/steps/crud_table.py
--- 4.3.0-3/tests/features/steps/crud_table.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/features/steps/crud_table.py	2025-12-24 23:39:20.000000000 +0000
@@ -34,9 +34,7 @@ def step_update_table(context):
     """
     Send insert into table.
     """
-    context.cli.sendline(
-        f"""update a set x = '{UPDATED_DATA}' where x = '{INITIAL_DATA}';"""
-    )
+    context.cli.sendline(f"""update a set x = '{UPDATED_DATA}' where x = '{INITIAL_DATA}';""")
 
 
 @when("we select from table")
diff -pruN 4.3.0-3/tests/features/steps/iocommands.py 4.4.0-1/tests/features/steps/iocommands.py
--- 4.3.0-3/tests/features/steps/iocommands.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/features/steps/iocommands.py	2025-12-24 23:39:20.000000000 +0000
@@ -8,15 +8,11 @@ import wrappers
 @when("we start external editor providing a file name")
 def step_edit_file(context):
     """Edit file with external editor."""
-    context.editor_file_name = os.path.join(
-        context.package_root, "test_file_{0}.sql".format(context.conf["vi"])
-    )
+    context.editor_file_name = os.path.join(context.package_root, "test_file_{0}.sql".format(context.conf["vi"]))
     if os.path.exists(context.editor_file_name):
         os.remove(context.editor_file_name)
     context.cli.sendline(r"\e {}".format(os.path.basename(context.editor_file_name)))
-    wrappers.expect_exact(
-        context, 'Entering Ex mode.  Type "visual" to go to Normal mode.', timeout=2
-    )
+    wrappers.expect_exact(context, 'Entering Ex mode.  Type "visual" to go to Normal mode.', timeout=2)
     wrappers.expect_exact(context, ":", timeout=2)
 
 
@@ -48,9 +44,7 @@ def step_edit_done_sql(context):
 
 @when("we tee output")
 def step_tee_ouptut(context):
-    context.tee_file_name = os.path.join(
-        context.package_root, "tee_file_{0}.sql".format(context.conf["vi"])
-    )
+    context.tee_file_name = os.path.join(context.package_root, "tee_file_{0}.sql".format(context.conf["vi"]))
     if os.path.exists(context.tee_file_name):
         os.remove(context.tee_file_name)
     context.cli.sendline(r"\o {}".format(os.path.basename(context.tee_file_name)))
diff -pruN 4.3.0-3/tests/features/steps/wrappers.py 4.4.0-1/tests/features/steps/wrappers.py
--- 4.3.0-3/tests/features/steps/wrappers.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/features/steps/wrappers.py	2025-12-24 23:39:20.000000000 +0000
@@ -1,6 +1,5 @@
 import re
 import pexpect
-from pgcli.main import COLOR_CODE_REGEX
 import textwrap
 
 from io import StringIO
@@ -37,10 +36,7 @@ def expect_exact(context, expected, time
 
 def expect_pager(context, expected, timeout):
     formatted = expected if isinstance(expected, list) else [expected]
-    formatted = [
-        f"{context.conf['pager_boundary']}\r\n{t}{context.conf['pager_boundary']}\r\n"
-        for t in formatted
-    ]
+    formatted = [f"{context.conf['pager_boundary']}\r\n{t}{context.conf['pager_boundary']}\r\n" for t in formatted]
 
     expect_exact(
         context,
diff -pruN 4.3.0-3/tests/formatter/test_sqlformatter.py 4.4.0-1/tests/formatter/test_sqlformatter.py
--- 4.3.0-3/tests/formatter/test_sqlformatter.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/formatter/test_sqlformatter.py	2025-12-24 23:39:20.000000000 +0000
@@ -55,7 +55,7 @@ def test_output_sql_insert():
     }
     formatter.query = 'SELECT * FROM "user";'
     output = adapter(data, header, table_format=table_format, **kwargs)
-    output_list = [l for l in output]
+    output_list = list(output)
     expected = [
         'INSERT INTO "user" ("id", "name", "email", "phone", "description", "created_at", "updated_at") VALUES',
         "  ('1', 'Jackson', 'jackson_test@gmail.com', '132454789', NULL, "
@@ -96,7 +96,7 @@ def test_output_sql_update():
     }
     formatter.query = 'SELECT * FROM "user";'
     output = adapter(data, header, table_format=table_format, **kwargs)
-    output_list = [l for l in output]
+    output_list = list(output)
     print(output_list)
     expected = [
         'UPDATE "user" SET',
diff -pruN 4.3.0-3/tests/metadata.py 4.4.0-1/tests/metadata.py
--- 4.3.0-3/tests/metadata.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/metadata.py	2025-12-24 23:39:20.000000000 +0000
@@ -23,16 +23,12 @@ def completion(display_meta, text, pos=0
 
 
 def function(text, pos=0, display=None):
-    return Completion(
-        text, display=display or text, start_position=pos, display_meta="function"
-    )
+    return Completion(text, display=display or text, start_position=pos, display_meta="function")
 
 
 def get_result(completer, text, position=None):
     position = len(text) if position is None else position
-    return completer.get_completions(
-        Document(text=text, cursor_position=position), Mock()
-    )
+    return completer.get_completions(Document(text=text, cursor_position=position), Mock())
 
 
 def result_set(completer, text, position=None):
@@ -73,10 +69,7 @@ class MetaData:
         return [keyword(kw, pos) for kw in self.completer.keywords_tree.keys()]
 
     def specials(self, pos=0):
-        return [
-            Completion(text=k, start_position=pos, display_meta=v.description)
-            for k, v in self.completer.pgspecial.commands.items()
-        ]
+        return [Completion(text=k, start_position=pos, display_meta=v.description) for k, v in self.completer.pgspecial.commands.items()]
 
     def columns(self, tbl, parent="public", typ="tables", pos=0):
         if typ == "functions":
@@ -87,42 +80,23 @@ class MetaData:
         return [column(escape(col), pos) for col in cols]
 
     def datatypes(self, parent="public", pos=0):
-        return [
-            datatype(escape(x), pos)
-            for x in self.metadata.get("datatypes", {}).get(parent, [])
-        ]
+        return [datatype(escape(x), pos) for x in self.metadata.get("datatypes", {}).get(parent, [])]
 
     def tables(self, parent="public", pos=0):
-        return [
-            table(escape(x), pos)
-            for x in self.metadata.get("tables", {}).get(parent, [])
-        ]
+        return [table(escape(x), pos) for x in self.metadata.get("tables", {}).get(parent, [])]
 
     def views(self, parent="public", pos=0):
-        return [
-            view(escape(x), pos) for x in self.metadata.get("views", {}).get(parent, [])
-        ]
+        return [view(escape(x), pos) for x in self.metadata.get("views", {}).get(parent, [])]
 
     def functions(self, parent="public", pos=0):
         return [
             function(
                 escape(x[0])
                 + "("
-                + ", ".join(
-                    arg_name + " := "
-                    for (arg_name, arg_mode) in zip(x[1], x[3])
-                    if arg_mode in ("b", "i")
-                )
+                + ", ".join(arg_name + " := " for (arg_name, arg_mode) in zip(x[1], x[3]) if arg_mode in ("b", "i"))
                 + ")",
                 pos,
-                escape(x[0])
-                + "("
-                + ", ".join(
-                    arg_name
-                    for (arg_name, arg_mode) in zip(x[1], x[3])
-                    if arg_mode in ("b", "i")
-                )
-                + ")",
+                escape(x[0]) + "(" + ", ".join(arg_name for (arg_name, arg_mode) in zip(x[1], x[3]) if arg_mode in ("b", "i")) + ")",
             )
             for x in self.metadata.get("functions", {}).get(parent, [])
         ]
@@ -132,24 +106,14 @@ class MetaData:
         return [schema(escape(s), pos=pos) for s in schemas]
 
     def functions_and_keywords(self, parent="public", pos=0):
-        return (
-            self.functions(parent, pos)
-            + self.builtin_functions(pos)
-            + self.keywords(pos)
-        )
+        return self.functions(parent, pos) + self.builtin_functions(pos) + self.keywords(pos)
 
     # Note that the filtering parameters here only apply to the columns
     def columns_functions_and_keywords(self, tbl, parent="public", typ="tables", pos=0):
-        return self.functions_and_keywords(pos=pos) + self.columns(
-            tbl, parent, typ, pos
-        )
+        return self.functions_and_keywords(pos=pos) + self.columns(tbl, parent, typ, pos)
 
     def from_clause_items(self, parent="public", pos=0):
-        return (
-            self.functions(parent, pos)
-            + self.views(parent, pos)
-            + self.tables(parent, pos)
-        )
+        return self.functions(parent, pos) + self.views(parent, pos) + self.tables(parent, pos)
 
     def schemas_and_from_clause_items(self, parent="public", pos=0):
         return self.from_clause_items(parent, pos) + self.schemas(pos)
@@ -205,9 +169,7 @@ class MetaData:
         from pgcli.pgcompleter import PGCompleter
         from pgspecial import PGSpecial
 
-        comp = PGCompleter(
-            smart_completion=True, settings=settings, pgspecial=PGSpecial()
-        )
+        comp = PGCompleter(smart_completion=True, settings=settings, pgspecial=PGSpecial())
 
         schemata, tables, tbl_cols, views, view_cols = [], [], [], [], []
 
@@ -226,20 +188,12 @@ class MetaData:
                 view_cols.extend([self._make_col(sch, tbl, col) for col in cols])
 
         functions = [
-            FunctionMetadata(sch, *func_meta, arg_defaults=None)
-            for sch, funcs in metadata["functions"].items()
-            for func_meta in funcs
+            FunctionMetadata(sch, *func_meta, arg_defaults=None) for sch, funcs in metadata["functions"].items() for func_meta in funcs
         ]
 
-        datatypes = [
-            (sch, typ)
-            for sch, datatypes in metadata["datatypes"].items()
-            for typ in datatypes
-        ]
+        datatypes = [(sch, typ) for sch, datatypes in metadata["datatypes"].items() for typ in datatypes]
 
-        foreignkeys = [
-            ForeignKey(*fk) for fks in metadata["foreignkeys"].values() for fk in fks
-        ]
+        foreignkeys = [ForeignKey(*fk) for fks in metadata["foreignkeys"].values() for fk in fks]
 
         comp.extend_schemata(schemata)
         comp.extend_relations(tables, kind="tables")
diff -pruN 4.3.0-3/tests/parseutils/test_function_metadata.py 4.4.0-1/tests/parseutils/test_function_metadata.py
--- 4.3.0-3/tests/parseutils/test_function_metadata.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/parseutils/test_function_metadata.py	2025-12-24 23:39:20.000000000 +0000
@@ -2,15 +2,9 @@ from pgcli.packages.parseutils.meta impo
 
 
 def test_function_metadata_eq():
-    f1 = FunctionMetadata(
-        "s", "f", ["x"], ["integer"], [], "int", False, False, False, False, None
-    )
-    f2 = FunctionMetadata(
-        "s", "f", ["x"], ["integer"], [], "int", False, False, False, False, None
-    )
-    f3 = FunctionMetadata(
-        "s", "g", ["x"], ["integer"], [], "int", False, False, False, False, None
-    )
+    f1 = FunctionMetadata("s", "f", ["x"], ["integer"], [], "int", False, False, False, False, None)
+    f2 = FunctionMetadata("s", "f", ["x"], ["integer"], [], "int", False, False, False, False, None)
+    f3 = FunctionMetadata("s", "g", ["x"], ["integer"], [], "int", False, False, False, False, None)
     assert f1 == f2
     assert f1 != f3
     assert not (f1 != f2)
diff -pruN 4.3.0-3/tests/parseutils/test_parseutils.py 4.4.0-1/tests/parseutils/test_parseutils.py
--- 4.3.0-3/tests/parseutils/test_parseutils.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/parseutils/test_parseutils.py	2025-12-24 23:39:20.000000000 +0000
@@ -19,9 +19,7 @@ def test_simple_select_single_table():
     assert tables == ((None, "abc", None, False),)
 
 
-@pytest.mark.parametrize(
-    "sql", ['select * from "abc"."def"', 'select * from abc."def"']
-)
+@pytest.mark.parametrize("sql", ['select * from "abc"."def"', 'select * from abc."def"'])
 def test_simple_select_single_table_schema_qualified_quoted_table(sql):
     tables = extract_tables(sql)
     assert tables == (("abc", "def", '"def"', False),)
@@ -172,7 +170,7 @@ def test_subselect_tables():
 @pytest.mark.parametrize("text", ["SELECT * FROM foo.", "SELECT 123 AS foo"])
 def test_extract_no_tables(text):
     tables = extract_tables(text)
-    assert tables == tuple()
+    assert tables == ()
 
 
 @pytest.mark.parametrize("arg_list", ["", "arg1", "arg1, arg2, arg3"])
@@ -225,9 +223,7 @@ def test_find_prev_keyword_where(sql):
     assert kw.value == "where" and stripped == "select * from foo where"
 
 
-@pytest.mark.parametrize(
-    "sql", ["create table foo (bar int, baz ", "select * from foo() as bar (baz "]
-)
+@pytest.mark.parametrize("sql", ["create table foo (bar int, baz ", "select * from foo() as bar (baz "])
 def test_find_prev_keyword_open_parens(sql):
     kw, _ = find_prev_keyword(sql)
     assert kw.value == "("
diff -pruN 4.3.0-3/tests/test_application_name.py 4.4.0-1/tests/test_application_name.py
--- 4.3.0-3/tests/test_application_name.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_application_name.py	2025-12-24 23:39:20.000000000 +0000
@@ -10,8 +10,6 @@ def test_application_name_in_env():
     runner = CliRunner()
     app_name = "wonderful_app"
     with patch.object(PGExecute, "__init__") as mock_pgxecute:
-        runner.invoke(
-            cli, ["127.0.0.1:5432/hello", "user"], env={"PGAPPNAME": app_name}
-        )
+        runner.invoke(cli, ["127.0.0.1:5432/hello", "user"], env={"PGAPPNAME": app_name})
         kwargs = mock_pgxecute.call_args.kwargs
         assert kwargs.get("application_name") == app_name
diff -pruN 4.3.0-3/tests/test_auth.py 4.4.0-1/tests/test_auth.py
--- 4.3.0-3/tests/test_auth.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_auth.py	2025-12-24 23:39:20.000000000 +0000
@@ -20,9 +20,7 @@ def test_keyring_get_password_ok():
 
 def test_keyring_get_password_exception():
     with mock.patch("pgcli.auth.keyring", return_value=mock.MagicMock()):
-        with mock.patch(
-            "pgcli.auth.keyring.get_password", side_effect=Exception("Boom!")
-        ):
+        with mock.patch("pgcli.auth.keyring.get_password", side_effect=Exception("Boom!")):
             assert auth.keyring_get_password("test") == ""
 
 
@@ -34,7 +32,5 @@ def test_keyring_set_password_ok():
 
 def test_keyring_set_password_exception():
     with mock.patch("pgcli.auth.keyring", return_value=mock.MagicMock()):
-        with mock.patch(
-            "pgcli.auth.keyring.set_password", side_effect=Exception("Boom!")
-        ):
+        with mock.patch("pgcli.auth.keyring.set_password", side_effect=Exception("Boom!")):
             auth.keyring_set_password("test", "abc123")
diff -pruN 4.3.0-3/tests/test_init_commands_simple.py 4.4.0-1/tests/test_init_commands_simple.py
--- 4.3.0-3/tests/test_init_commands_simple.py	1970-01-01 00:00:00.000000000 +0000
+++ 4.4.0-1/tests/test_init_commands_simple.py	2025-12-24 23:39:20.000000000 +0000
@@ -0,0 +1,94 @@
+import pytest
+from click.testing import CliRunner
+
+from pgcli.main import cli, PGCli
+
+
+@pytest.fixture
+def dummy_exec(monkeypatch, tmp_path):
+    # Capture executed commands
+    # Isolate config directory for tests
+    monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path))
+    dummy_cmds = []
+
+    class DummyExec:
+        def run(self, cmd):
+            # Ignore ping SELECT 1 commands used for exiting CLI
+            if cmd.strip().upper() == "SELECT 1":
+                return []
+            # Record init commands
+            dummy_cmds.append(cmd)
+            return []
+
+        def get_timezone(self):
+            return "UTC"
+
+        def set_timezone(self, *args, **kwargs):
+            pass
+
+    def fake_connect(self, *args, **kwargs):
+        self.pgexecute = DummyExec()
+
+    monkeypatch.setattr(PGCli, "connect", fake_connect)
+    return dummy_cmds
+
+
+def test_init_command_option(dummy_exec):
+    "Test that --init-command triggers execution of the command."
+    runner = CliRunner()
+    # Use a custom init command and --ping to exit the CLI after init commands
+    result = runner.invoke(cli, ["--init-command", "SELECT foo", "--ping", "db", "user"])
+    assert result.exit_code == 0
+    # Should print the init command
+    assert "Running init commands: SELECT foo" in result.output
+    # Should exit via ping
+    assert "PONG" in result.output
+    # DummyExec should have recorded only the init command
+    assert dummy_exec == ["SELECT foo"]
+
+
+def test_init_commands_from_config(dummy_exec, tmp_path):
+    """
+    Test that init commands defined in the config file are executed on startup.
+    """
+    # Create a temporary config file with init-commands
+    config_file = tmp_path / "pgclirc_test"
+    config_file.write_text("[main]\n[init-commands]\nfirst = SELECT foo;\nsecond = SELECT bar;\n")
+
+    runner = CliRunner()
+    # Use --ping to exit the CLI after init commands
+    result = runner.invoke(cli, ["--pgclirc", str(config_file.absolute()), "--ping", "testdb", "user"])
+    assert result.exit_code == 0
+    # Should print both init commands in order (note trailing semicolons cause double ';;')
+    assert "Running init commands: SELECT foo;; SELECT bar;" in result.output
+    # DummyExec should have recorded both commands
+    assert dummy_exec == ["SELECT foo;", "SELECT bar;"]
+
+
+def test_init_commands_option_and_config(dummy_exec, tmp_path):
+    """
+    Test that CLI-provided init command is appended after config-defined commands.
+    """
+    # Create a temporary config file with init-commands
+    config_file = tmp_path / "pgclirc_test"
+    config_file.write_text("[main]\n [init-commands]\nfirst = SELECT foo;\n")
+
+    runner = CliRunner()
+    # Use --ping to exit the CLI after init commands
+    result = runner.invoke(
+        cli,
+        [
+            "--pgclirc",
+            str(config_file),
+            "--init-command",
+            "SELECT baz;",
+            "--ping",
+            "testdb",
+            "user",
+        ],
+    )
+    assert result.exit_code == 0
+    # Should print config command followed by CLI option (double ';' between commands)
+    assert "Running init commands: SELECT foo;; SELECT baz;" in result.output
+    # DummyExec should record both commands in order
+    assert dummy_exec == ["SELECT foo;", "SELECT baz;"]
diff -pruN 4.3.0-3/tests/test_main.py 4.4.0-1/tests/test_main.py
--- 4.3.0-3/tests/test_main.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_main.py	2025-12-24 23:39:20.000000000 +0000
@@ -61,9 +61,7 @@ def test_obfuscate_process_password():
 
 def test_format_output():
     settings = OutputSettings(table_format="psql", dcmlfmt="d", floatfmt="g")
-    results = format_output(
-        "Title", [("abc", "def")], ["head1", "head2"], "test status", settings
-    )
+    results = format_output("Title", [("abc", "def")], ["head1", "head2"], "test status", settings)
     expected = [
         "Title",
         "+-------+-------+",
@@ -128,9 +126,7 @@ def test_no_column_date_formats():
 
 
 def test_format_output_truncate_on():
-    settings = OutputSettings(
-        table_format="psql", dcmlfmt="d", floatfmt="g", max_field_width=10
-    )
+    settings = OutputSettings(table_format="psql", dcmlfmt="d", floatfmt="g", max_field_width=10)
     results = format_output(
         None,
         [("first field value", "second field value")],
@@ -149,9 +145,7 @@ def test_format_output_truncate_on():
 
 
 def test_format_output_truncate_off():
-    settings = OutputSettings(
-        table_format="psql", dcmlfmt="d", floatfmt="g", max_field_width=None
-    )
+    settings = OutputSettings(table_format="psql", dcmlfmt="d", floatfmt="g", max_field_width=None)
     long_field_value = ("first field " * 100).strip()
     results = format_output(None, [(long_field_value,)], ["head1"], None, settings)
     lines = list(results)
@@ -207,12 +201,8 @@ def test_format_array_output_expanded(ex
 
 
 def test_format_output_auto_expand():
-    settings = OutputSettings(
-        table_format="psql", dcmlfmt="d", floatfmt="g", max_width=100
-    )
-    table_results = format_output(
-        "Title", [("abc", "def")], ["head1", "head2"], "test status", settings
-    )
+    settings = OutputSettings(table_format="psql", dcmlfmt="d", floatfmt="g", max_width=100)
+    table_results = format_output("Title", [("abc", "def")], ["head1", "head2"], "test status", settings)
     table = [
         "Title",
         "+-------+-------+",
@@ -269,18 +259,18 @@ test_ids = [
 def pset_pager_mocks():
     cli = PGCli()
     cli.watch_command = None
-    with mock.patch("pgcli.main.click.echo") as mock_echo, mock.patch(
-        "pgcli.main.click.echo_via_pager"
-    ) as mock_echo_via_pager, mock.patch.object(cli, "prompt_app") as mock_app:
+    with (
+        mock.patch("pgcli.main.click.echo") as mock_echo,
+        mock.patch("pgcli.main.click.echo_via_pager") as mock_echo_via_pager,
+        mock.patch.object(cli, "prompt_app") as mock_app,
+    ):
         yield cli, mock_echo, mock_echo_via_pager, mock_app
 
 
 @pytest.mark.parametrize("term_height,term_width,text", test_data, ids=test_ids)
 def test_pset_pager_off(term_height, term_width, text, pset_pager_mocks):
     cli, mock_echo, mock_echo_via_pager, mock_cli = pset_pager_mocks
-    mock_cli.output.get_size.return_value = termsize(
-        rows=term_height, columns=term_width
-    )
+    mock_cli.output.get_size.return_value = termsize(rows=term_height, columns=term_width)
 
     with mock.patch.object(cli.pgspecial, "pager_config", PAGER_OFF):
         cli.echo_via_pager(text)
@@ -292,9 +282,7 @@ def test_pset_pager_off(term_height, ter
 @pytest.mark.parametrize("term_height,term_width,text", test_data, ids=test_ids)
 def test_pset_pager_always(term_height, term_width, text, pset_pager_mocks):
     cli, mock_echo, mock_echo_via_pager, mock_cli = pset_pager_mocks
-    mock_cli.output.get_size.return_value = termsize(
-        rows=term_height, columns=term_width
-    )
+    mock_cli.output.get_size.return_value = termsize(rows=term_height, columns=term_width)
 
     with mock.patch.object(cli.pgspecial, "pager_config", PAGER_ALWAYS):
         cli.echo_via_pager(text)
@@ -306,14 +294,10 @@ def test_pset_pager_always(term_height,
 pager_on_test_data = [l + (r,) for l, r in zip(test_data, use_pager_when_on)]
 
 
-@pytest.mark.parametrize(
-    "term_height,term_width,text,use_pager", pager_on_test_data, ids=test_ids
-)
+@pytest.mark.parametrize("term_height,term_width,text,use_pager", pager_on_test_data, ids=test_ids)
 def test_pset_pager_on(term_height, term_width, text, use_pager, pset_pager_mocks):
     cli, mock_echo, mock_echo_via_pager, mock_cli = pset_pager_mocks
-    mock_cli.output.get_size.return_value = termsize(
-        rows=term_height, columns=term_width
-    )
+    mock_cli.output.get_size.return_value = termsize(rows=term_height, columns=term_width)
 
     with mock.patch.object(cli.pgspecial, "pager_config", PAGER_LONG_OUTPUT):
         cli.echo_via_pager(text)
@@ -330,15 +314,14 @@ def test_pset_pager_on(term_height, term
     "text,expected_length",
     [
         (
-            "22200K .......\u001b[0m\u001b[91m... .......... ...\u001b[0m\u001b[91m.\u001b[0m\u001b[91m...... .........\u001b[0m\u001b[91m.\u001b[0m\u001b[91m \u001b[0m\u001b[91m.\u001b[0m\u001b[91m.\u001b[0m\u001b[91m.\u001b[0m\u001b[91m.\u001b[0m\u001b[91m...... 50% 28.6K 12m55s",
+            "22200K .......\u001b[0m\u001b[91m... .......... ...\u001b[0m\u001b[91m.\u001b[0m\u001b[91m...... .........\u001b[0m\u001b[91m.\u001b[0m\u001b[91m \u001b[0m\u001b[91m.\u001b[0m\u001b[91m.\u001b[0m\u001b[91m.\u001b[0m\u001b[91m.\u001b[0m\u001b[91m...... 50% 28.6K 12m55s",  # noqa: E501
             78,
         ),
         ("=\u001b[m=", 2),
         ("-\u001b]23\u0007-", 2),
     ],
 )
-def test_color_pattern(text, expected_length, pset_pager_mocks):
-    cli = pset_pager_mocks[0]
+def test_color_pattern(text, expected_length):
     assert len(COLOR_CODE_REGEX.sub("", text)) == expected_length
 
 
@@ -405,34 +388,24 @@ def test_logfile_unwriteable_file(execut
     cli = PGCli(pgexecute=executor)
     statement = r"\log-file forbidden.log"
     with mock.patch("builtins.open") as mock_open:
-        mock_open.side_effect = PermissionError(
-            "[Errno 13] Permission denied: 'forbidden.log'"
-        )
+        mock_open.side_effect = PermissionError("[Errno 13] Permission denied: 'forbidden.log'")
         result = run(executor, statement, pgspecial=cli.pgspecial)
-    assert result == [
-        "[Errno 13] Permission denied: 'forbidden.log'\nLogfile capture disabled"
-    ]
+    assert result == ["[Errno 13] Permission denied: 'forbidden.log'\nLogfile capture disabled"]
 
 
 @dbtest
 def test_watch_works(executor):
     cli = PGCli(pgexecute=executor)
 
-    def run_with_watch(
-        query, target_call_count=1, expected_output="", expected_timing=None
-    ):
+    def run_with_watch(query, target_call_count=1, expected_output="", expected_timing=None):
         """
         :param query: Input to the CLI
         :param target_call_count: Number of times the user lets the command run before Ctrl-C
         :param expected_output: Substring expected to be found for each executed query
         :param expected_timing: value `time.sleep` expected to be called with on every invocation
         """
-        with mock.patch.object(cli, "echo_via_pager") as mock_echo, mock.patch(
-            "pgcli.main.sleep"
-        ) as mock_sleep:
-            mock_sleep.side_effect = [None] * (target_call_count - 1) + [
-                KeyboardInterrupt
-            ]
+        with mock.patch.object(cli, "echo_via_pager") as mock_echo, mock.patch("pgcli.main.sleep") as mock_sleep:
+            mock_sleep.side_effect = [None] * (target_call_count - 1) + [KeyboardInterrupt]
             cli.handle_watch_command(query)
         # Validate that sleep was called with the right timing
         for i in range(target_call_count - 1):
@@ -446,16 +419,11 @@ def test_watch_works(executor):
     with mock.patch("pgcli.main.click.secho") as mock_secho:
         cli.handle_watch_command(r"\watch 2")
     mock_secho.assert_called()
-    assert (
-        r"\watch cannot be used with an empty query"
-        in mock_secho.call_args_list[0][0][0]
-    )
+    assert r"\watch cannot be used with an empty query" in mock_secho.call_args_list[0][0][0]
 
     # Usage 1: Run a query and then re-run it with \watch across two prompts.
     run_with_watch("SELECT 111", expected_output="111")
-    run_with_watch(
-        "\\watch 10", target_call_count=2, expected_output="111", expected_timing=10
-    )
+    run_with_watch("\\watch 10", target_call_count=2, expected_output="111", expected_timing=10)
 
     # Usage 2: Run a query and \watch via the same prompt.
     run_with_watch(
@@ -466,9 +434,7 @@ def test_watch_works(executor):
     )
 
     # Usage 3: Re-run the last watched command with a new timing
-    run_with_watch(
-        "\\watch 5", target_call_count=4, expected_output="222", expected_timing=5
-    )
+    run_with_watch("\\watch 5", target_call_count=4, expected_output="222", expected_timing=5)
 
 
 def test_missing_rc_dir(tmpdir):
@@ -482,9 +448,7 @@ def test_quoted_db_uri(tmpdir):
     with mock.patch.object(PGCli, "connect") as mock_connect:
         cli = PGCli(pgclirc_file=str(tmpdir.join("rcfile")))
         cli.connect_uri("postgres://bar%5E:%5Dfoo@baz.com/testdb%5B")
-    mock_connect.assert_called_with(
-        database="testdb[", host="baz.com", user="bar^", passwd="]foo"
-    )
+    mock_connect.assert_called_with(database="testdb[", host="baz.com", user="bar^", passwd="]foo")
 
 
 def test_pg_service_file(tmpdir):
@@ -544,8 +508,7 @@ def test_ssl_db_uri(tmpdir):
     with mock.patch.object(PGCli, "connect") as mock_connect:
         cli = PGCli(pgclirc_file=str(tmpdir.join("rcfile")))
         cli.connect_uri(
-            "postgres://bar%5E:%5Dfoo@baz.com/testdb%5B?"
-            "sslmode=verify-full&sslcert=m%79.pem&sslkey=my-key.pem&sslrootcert=c%61.pem"
+            "postgres://bar%5E:%5Dfoo@baz.com/testdb%5B?sslmode=verify-full&sslcert=m%79.pem&sslkey=my-key.pem&sslrootcert=c%61.pem"
         )
     mock_connect.assert_called_with(
         database="testdb[",
@@ -563,17 +526,13 @@ def test_port_db_uri(tmpdir):
     with mock.patch.object(PGCli, "connect") as mock_connect:
         cli = PGCli(pgclirc_file=str(tmpdir.join("rcfile")))
         cli.connect_uri("postgres://bar:foo@baz.com:2543/testdb")
-    mock_connect.assert_called_with(
-        database="testdb", host="baz.com", user="bar", passwd="foo", port="2543"
-    )
+    mock_connect.assert_called_with(database="testdb", host="baz.com", user="bar", passwd="foo", port="2543")
 
 
 def test_multihost_db_uri(tmpdir):
     with mock.patch.object(PGCli, "connect") as mock_connect:
         cli = PGCli(pgclirc_file=str(tmpdir.join("rcfile")))
-        cli.connect_uri(
-            "postgres://bar:foo@baz1.com:2543,baz2.com:2543,baz3.com:2543/testdb"
-        )
+        cli.connect_uri("postgres://bar:foo@baz1.com:2543,baz2.com:2543,baz3.com:2543/testdb")
     mock_connect.assert_called_with(
         database="testdb",
         host="baz1.com,baz2.com,baz3.com",
@@ -588,9 +547,7 @@ def test_application_name_db_uri(tmpdir)
         mock_pgexecute.return_value = None
         cli = PGCli(pgclirc_file=str(tmpdir.join("rcfile")))
         cli.connect_uri("postgres://bar@baz.com/?application_name=cow")
-    mock_pgexecute.assert_called_with(
-        "bar", "bar", "", "baz.com", "", "", notify_callback, application_name="cow"
-    )
+    mock_pgexecute.assert_called_with("bar", "bar", "", "baz.com", "", "", notify_callback, application_name="cow")
 
 
 @pytest.mark.parametrize(
@@ -608,9 +565,11 @@ def test_application_name_db_uri(tmpdir)
         (60, "1 minute"),
         (61, "1 minute 1 second"),
         (123, "2 minutes 3 seconds"),
+        (124.4, "2 minutes 4 seconds"),
         (3600, "1 hour"),
         (7235, "2 hours 35 seconds"),
         (9005, "2 hours 30 minutes 5 seconds"),
+        (9006.7, "2 hours 30 minutes 6 seconds"),
         (86401, "24 hours 1 second"),
     ],
 )
diff -pruN 4.3.0-3/tests/test_naive_completion.py 4.4.0-1/tests/test_naive_completion.py
--- 4.3.0-3/tests/test_naive_completion.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_naive_completion.py	2025-12-24 23:39:20.000000000 +0000
@@ -21,56 +21,38 @@ def complete_event():
 def test_empty_string_completion(completer, complete_event):
     text = ""
     position = 0
-    result = completions_to_set(
-        completer.get_completions(
-            Document(text=text, cursor_position=position), complete_event
-        )
-    )
+    result = completions_to_set(completer.get_completions(Document(text=text, cursor_position=position), complete_event))
     assert result == completions_to_set(map(Completion, completer.all_completions))
 
 
 def test_select_keyword_completion(completer, complete_event):
     text = "SEL"
     position = len("SEL")
-    result = completions_to_set(
-        completer.get_completions(
-            Document(text=text, cursor_position=position), complete_event
-        )
-    )
+    result = completions_to_set(completer.get_completions(Document(text=text, cursor_position=position), complete_event))
     assert result == completions_to_set([Completion(text="SELECT", start_position=-3)])
 
 
 def test_function_name_completion(completer, complete_event):
     text = "SELECT MA"
     position = len("SELECT MA")
-    result = completions_to_set(
-        completer.get_completions(
-            Document(text=text, cursor_position=position), complete_event
-        )
-    )
-    assert result == completions_to_set(
-        [
-            Completion(text="MATERIALIZED VIEW", start_position=-2),
-            Completion(text="MAX", start_position=-2),
-            Completion(text="MAXEXTENTS", start_position=-2),
-            Completion(text="MAKE_DATE", start_position=-2),
-            Completion(text="MAKE_TIME", start_position=-2),
-            Completion(text="MAKE_TIMESTAMPTZ", start_position=-2),
-            Completion(text="MAKE_INTERVAL", start_position=-2),
-            Completion(text="MASKLEN", start_position=-2),
-            Completion(text="MAKE_TIMESTAMP", start_position=-2),
-        ]
-    )
+    result = completions_to_set(completer.get_completions(Document(text=text, cursor_position=position), complete_event))
+    assert result == completions_to_set([
+        Completion(text="MATERIALIZED VIEW", start_position=-2),
+        Completion(text="MAX", start_position=-2),
+        Completion(text="MAXEXTENTS", start_position=-2),
+        Completion(text="MAKE_DATE", start_position=-2),
+        Completion(text="MAKE_TIME", start_position=-2),
+        Completion(text="MAKE_TIMESTAMPTZ", start_position=-2),
+        Completion(text="MAKE_INTERVAL", start_position=-2),
+        Completion(text="MASKLEN", start_position=-2),
+        Completion(text="MAKE_TIMESTAMP", start_position=-2),
+    ])
 
 
 def test_column_name_completion(completer, complete_event):
     text = "SELECT  FROM users"
     position = len("SELECT ")
-    result = completions_to_set(
-        completer.get_completions(
-            Document(text=text, cursor_position=position), complete_event
-        )
-    )
+    result = completions_to_set(completer.get_completions(Document(text=text, cursor_position=position), complete_event))
     assert result == completions_to_set(map(Completion, completer.all_completions))
 
 
@@ -84,27 +66,18 @@ def test_alter_well_known_keywords_compl
             smart_completion=True,
         )
     )
-    assert result > completions_to_set(
-        [
-            Completion(text="DATABASE", display_meta="keyword"),
-            Completion(text="TABLE", display_meta="keyword"),
-            Completion(text="SYSTEM", display_meta="keyword"),
-        ]
-    )
-    assert (
-        completions_to_set([Completion(text="CREATE", display_meta="keyword")])
-        not in result
-    )
+    assert result > completions_to_set([
+        Completion(text="DATABASE", display_meta="keyword"),
+        Completion(text="TABLE", display_meta="keyword"),
+        Completion(text="SYSTEM", display_meta="keyword"),
+    ])
+    assert completions_to_set([Completion(text="CREATE", display_meta="keyword")]) not in result
 
 
 def test_special_name_completion(completer, complete_event):
     text = "\\"
     position = len("\\")
-    result = completions_to_set(
-        completer.get_completions(
-            Document(text=text, cursor_position=position), complete_event
-        )
-    )
+    result = completions_to_set(completer.get_completions(Document(text=text, cursor_position=position), complete_event))
     # Special commands will NOT be suggested during naive completion mode.
     assert result == completions_to_set([])
 
@@ -119,15 +92,13 @@ def test_datatype_name_completion(comple
             smart_completion=True,
         )
     )
-    assert result == completions_to_set(
-        [
-            Completion(text="INET", display_meta="datatype"),
-            Completion(text="INT", display_meta="datatype"),
-            Completion(text="INT2", display_meta="datatype"),
-            Completion(text="INT4", display_meta="datatype"),
-            Completion(text="INT8", display_meta="datatype"),
-            Completion(text="INTEGER", display_meta="datatype"),
-            Completion(text="INTERNAL", display_meta="datatype"),
-            Completion(text="INTERVAL", display_meta="datatype"),
-        ]
-    )
+    assert result == completions_to_set([
+        Completion(text="INET", display_meta="datatype"),
+        Completion(text="INT", display_meta="datatype"),
+        Completion(text="INT2", display_meta="datatype"),
+        Completion(text="INT4", display_meta="datatype"),
+        Completion(text="INT8", display_meta="datatype"),
+        Completion(text="INTEGER", display_meta="datatype"),
+        Completion(text="INTERNAL", display_meta="datatype"),
+        Completion(text="INTERVAL", display_meta="datatype"),
+    ])
diff -pruN 4.3.0-3/tests/test_pgcompleter.py 4.4.0-1/tests/test_pgcompleter.py
--- 4.3.0-3/tests/test_pgcompleter.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_pgcompleter.py	2025-12-24 23:39:20.000000000 +0000
@@ -39,9 +39,7 @@ def test_generate_alias_uses_upper_case_
         ("sometable", "s"),
     ],
 )
-def test_generate_alias_uses_first_char_and_every_preceded_by_underscore(
-    table_name, alias
-):
+def test_generate_alias_uses_first_char_and_every_preceded_by_underscore(table_name, alias):
     assert pgcompleter.generate_alias(table_name) == alias
 
 
@@ -49,9 +47,7 @@ def test_generate_alias_uses_first_char_
     "table_name, alias_map, alias",
     [
         ("some_table", {"some_table": "my_alias"}, "my_alias"),
-        pytest.param(
-            "some_other_table", {"some_table": "my_alias"}, "sot", id="no_match_in_map"
-        ),
+        pytest.param("some_other_table", {"some_table": "my_alias"}, "sot", id="no_match_in_map"),
     ],
 )
 def test_generate_alias_can_use_alias_map(table_name, alias_map, alias):
@@ -83,9 +79,7 @@ def test_pgcompleter_alias_uses_configur
         ("SomeTable", {"SomeTable": "my_alias"}, "my_alias"),
     ],
 )
-def test_generate_alias_prefers_alias_over_upper_case_name(
-    table_name, alias_map, alias
-):
+def test_generate_alias_prefers_alias_over_upper_case_name(table_name, alias_map, alias):
     assert pgcompleter.generate_alias(table_name, alias_map) == alias
 
 
diff -pruN 4.3.0-3/tests/test_pgexecute.py 4.4.0-1/tests/test_pgexecute.py
--- 4.3.0-3/tests/test_pgexecute.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_pgexecute.py	2025-12-24 23:39:20.000000000 +0000
@@ -90,8 +90,8 @@ def test_expanded_slash_G(executor, pgsp
     # Tests whether we reset the expanded output after a \G.
     run(executor, """create table test(a boolean)""")
     run(executor, """insert into test values(True)""")
-    results = run(executor, r"""select * from test \G""", pgspecial=pgspecial)
-    assert pgspecial.expanded_output == False
+    run(executor, r"""select * from test \G""", pgspecial=pgspecial)
+    assert pgspecial.expanded_output is False
 
 
 @dbtest
@@ -132,9 +132,7 @@ def test_schemata_table_views_and_column
     # views
     assert set(executor.views()) >= {("public", "d")}
 
-    assert set(executor.view_columns()) >= {
-        ("public", "d", "e", "integer", False, None)
-    }
+    assert set(executor.view_columns()) >= {("public", "d", "e", "integer", False, None)}
 
 
 @dbtest
@@ -147,9 +145,7 @@ def test_foreign_key_query(executor):
         "create table schema2.child(childid int PRIMARY KEY, motherid int REFERENCES schema1.parent)",
     )
 
-    assert set(executor.foreignkeys()) >= {
-        ("schema1", "parent", "parentid", "schema2", "child", "motherid")
-    }
+    assert set(executor.foreignkeys()) >= {("schema1", "parent", "parentid", "schema2", "child", "motherid")}
 
 
 @dbtest
@@ -198,9 +194,7 @@ def test_functions_query(executor):
             return_type="integer",
             is_set_returning=True,
         ),
-        function_meta_data(
-            schema_name="schema1", func_name="func2", return_type="integer"
-        ),
+        function_meta_data(schema_name="schema1", func_name="func2", return_type="integer"),
     }
 
 
@@ -251,9 +245,7 @@ Routine: scanner_yyerror
 
 @dbtest
 def test_invalid_column_name(executor, exception_formatter):
-    result = run(
-        executor, "select invalid command", exception_formatter=exception_formatter
-    )
+    result = run(executor, "select invalid command", exception_formatter=exception_formatter)
     assert 'column "invalid" does not exist' in result[0]
 
 
@@ -268,9 +260,7 @@ def test_unicode_support_in_output(execu
     run(executor, "insert into unicodechars (t) values ('é')")
 
     # See issue #24, this raises an exception without proper handling
-    assert "é" in run(
-        executor, "select * from unicodechars", join=True, expanded=expanded
-    )
+    assert "é" in run(executor, "select * from unicodechars", join=True, expanded=expanded)
 
 
 @dbtest
@@ -279,8 +269,8 @@ def test_not_is_special(executor, pgspec
     query = "select 1"
     result = list(executor.run(query, pgspecial=pgspecial))
     success, is_special = result[0][5:]
-    assert success == True
-    assert is_special == False
+    assert success is True
+    assert is_special is False
 
 
 @dbtest
@@ -289,8 +279,8 @@ def test_execute_from_file_no_arg(execut
     result = list(executor.run(r"\i", pgspecial=pgspecial))
     status, sql, success, is_special = result[0][3:]
     assert "missing required argument" in status
-    assert success == False
-    assert is_special == True
+    assert success is False
+    assert is_special is True
 
 
 @dbtest
@@ -304,14 +294,12 @@ def test_execute_from_file_io_error(os,
     result = list(executor.run(r"\i test", pgspecial=pgspecial))
     status, sql, success, is_special = result[0][3:]
     assert status == "test"
-    assert success == False
-    assert is_special == True
+    assert success is False
+    assert is_special is True
 
 
 @dbtest
-def test_execute_from_commented_file_that_executes_another_file(
-    executor, pgspecial, tmpdir
-):
+def test_execute_from_commented_file_that_executes_another_file(executor, pgspecial, tmpdir):
     # https://github.com/dbcli/pgcli/issues/1336
     sqlfile1 = tmpdir.join("test01.sql")
     sqlfile1.write("-- asdf \n\\h")
@@ -321,10 +309,10 @@ def test_execute_from_commented_file_tha
     rcfile = str(tmpdir.join("rcfile"))
     print(rcfile)
     cli = PGCli(pgexecute=executor, pgclirc_file=rcfile)
-    assert cli != None
+    assert cli is not None
     statement = "--comment\n\\h"
     result = run(executor, statement, pgspecial=cli.pgspecial)
-    assert result != None
+    assert result is not None
     assert result[0].find("ALTER TABLE")
 
 
@@ -333,38 +321,38 @@ def test_execute_commented_first_line_an
     # just some base cases that should work also
     statement = "--comment\nselect now();"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("now") >= 0
 
     statement = "/*comment*/\nselect now();"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("now") >= 0
 
     # https://github.com/dbcli/pgcli/issues/1362
     statement = "--comment\n\\h"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("ALTER") >= 0
     assert result[1].find("ABORT") >= 0
 
     statement = "--comment1\n--comment2\n\\h"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("ALTER") >= 0
     assert result[1].find("ABORT") >= 0
 
-    statement = "/*comment*/\n\h;"
+    statement = "/*comment*/\n\\h;"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("ALTER") >= 0
     assert result[1].find("ABORT") >= 0
 
-    statement = """/*comment1
+    statement = r"""/*comment1
     comment2*/
     \h"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("ALTER") >= 0
     assert result[1].find("ABORT") >= 0
 
@@ -374,43 +362,43 @@ def test_execute_commented_first_line_an
     comment4*/
     \\h"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("ALTER") >= 0
     assert result[1].find("ABORT") >= 0
 
-    statement = "    /*comment*/\n\h;"
+    statement = "    /*comment*/\n\\h;"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("ALTER") >= 0
     assert result[1].find("ABORT") >= 0
 
-    statement = "/*comment\ncomment line2*/\n\h;"
+    statement = "/*comment\ncomment line2*/\n\\h;"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("ALTER") >= 0
     assert result[1].find("ABORT") >= 0
 
-    statement = "          /*comment\ncomment line2*/\n\h;"
+    statement = "          /*comment\ncomment line2*/\n\\h;"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("ALTER") >= 0
     assert result[1].find("ABORT") >= 0
 
     statement = """\\h /*comment4 */"""
     result = run(executor, statement, pgspecial=pgspecial)
     print(result)
-    assert result != None
+    assert result is not None
     assert result[0].find("No help") >= 0
 
     # TODO: we probably don't want to do this but sqlparse is not parsing things well
     # we relly want it to find help but right now, sqlparse isn't dropping the /*comment*/
     # style comments after command
 
-    statement = """/*comment1*/
+    statement = r"""/*comment1*/
     \h
     /*comment4 */"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[0].find("No help") >= 0
 
     # TODO: same for this one
@@ -422,7 +410,7 @@ def test_execute_commented_first_line_an
     comment5
     comment6*/"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[0].find("No help") >= 0
 
 
@@ -433,12 +421,12 @@ def test_execute_commented_first_line_an
     # just some base cases that should work also
     statement = "--comment\nselect now();"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("now") >= 0
 
     statement = "/*comment*/\nselect now();"
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[1].find("now") >= 0
 
     # this simulates the original error (1403) without having to add/drop tables
@@ -448,26 +436,26 @@ def test_execute_commented_first_line_an
     # test that the statement works
     statement = """VALUES (1, 'one'), (2, 'two'), (3, 'three');"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
     # test the statement with a \n in the middle
     statement = """VALUES (1, 'one'),\n (2, 'two'), (3, 'three');"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
     # test the statement with a newline in the middle
     statement = """VALUES (1, 'one'),
      (2, 'two'), (3, 'three');"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
     # now add a single comment line
     statement = """--comment\nVALUES (1, 'one'), (2, 'two'), (3, 'three');"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
     # doing without special char \n
@@ -475,13 +463,13 @@ def test_execute_commented_first_line_an
     VALUES (1,'one'),
     (2, 'two'), (3, 'three');"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
     # two comment lines
     statement = """--comment\n--comment2\nVALUES (1,'one'), (2, 'two'), (3, 'three');"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
     # doing without special char \n
@@ -490,7 +478,7 @@ def test_execute_commented_first_line_an
     VALUES (1,'one'), (2, 'two'), (3, 'three');
     """
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
     # multiline comment + newline in middle of the statement
@@ -500,7 +488,7 @@ comment3*/
 VALUES (1,'one'),
 (2, 'two'), (3, 'three');"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
     # multiline comment + newline in middle of the statement
@@ -513,7 +501,7 @@ VALUES (1,'one'),
 --comment4
 --comment5"""
     result = run(executor, statement, pgspecial=pgspecial)
-    assert result != None
+    assert result is not None
     assert result[5].find("three") >= 0
 
 
@@ -582,9 +570,7 @@ def test_unicode_support_in_enum_type(ex
 def test_json_renders_without_u_prefix(executor, expanded):
     run(executor, "create table jsontest(d json)")
     run(executor, """insert into jsontest (d) values ('{"name": "Éowyn"}')""")
-    result = run(
-        executor, "SELECT d FROM jsontest LIMIT 1", join=True, expanded=expanded
-    )
+    result = run(executor, "SELECT d FROM jsontest LIMIT 1", join=True, expanded=expanded)
 
     assert '{"name": "Éowyn"}' in result
 
@@ -593,9 +579,7 @@ def test_json_renders_without_u_prefix(e
 def test_jsonb_renders_without_u_prefix(executor, expanded):
     run(executor, "create table jsonbtest(d jsonb)")
     run(executor, """insert into jsonbtest (d) values ('{"name": "Éowyn"}')""")
-    result = run(
-        executor, "SELECT d FROM jsonbtest LIMIT 1", join=True, expanded=expanded
-    )
+    result = run(executor, "SELECT d FROM jsonbtest LIMIT 1", join=True, expanded=expanded)
 
     assert '{"name": "Éowyn"}' in result
 
@@ -603,28 +587,10 @@ def test_jsonb_renders_without_u_prefix(
 @dbtest
 def test_date_time_types(executor):
     run(executor, "SET TIME ZONE UTC")
-    assert (
-        run(executor, "SELECT (CAST('00:00:00' AS time))", join=True).split("\n")[3]
-        == "| 00:00:00 |"
-    )
-    assert (
-        run(executor, "SELECT (CAST('00:00:00+14:59' AS timetz))", join=True).split(
-            "\n"
-        )[3]
-        == "| 00:00:00+14:59 |"
-    )
-    assert (
-        run(executor, "SELECT (CAST('4713-01-01 BC' AS date))", join=True).split("\n")[
-            3
-        ]
-        == "| 4713-01-01 BC |"
-    )
-    assert (
-        run(
-            executor, "SELECT (CAST('4713-01-01 00:00:00 BC' AS timestamp))", join=True
-        ).split("\n")[3]
-        == "| 4713-01-01 00:00:00 BC |"
-    )
+    assert run(executor, "SELECT (CAST('00:00:00' AS time))", join=True).split("\n")[3] == "| 00:00:00 |"
+    assert run(executor, "SELECT (CAST('00:00:00+14:59' AS timetz))", join=True).split("\n")[3] == "| 00:00:00+14:59 |"
+    assert run(executor, "SELECT (CAST('4713-01-01 BC' AS date))", join=True).split("\n")[3] == "| 4713-01-01 BC |"
+    assert run(executor, "SELECT (CAST('4713-01-01 00:00:00 BC' AS timestamp))", join=True).split("\n")[3] == "| 4713-01-01 00:00:00 BC |"
     assert (
         run(
             executor,
@@ -634,10 +600,7 @@ def test_date_time_types(executor):
         == "| 4713-01-01 00:00:00+00 BC |"
     )
     assert (
-        run(
-            executor, "SELECT (CAST('-123456789 days 12:23:56' AS interval))", join=True
-        ).split("\n")[3]
-        == "| -123456789 days, 12:23:56 |"
+        run(executor, "SELECT (CAST('-123456789 days 12:23:56' AS interval))", join=True).split("\n")[3] == "| -123456789 days, 12:23:56 |"
     )
 
 
@@ -670,20 +633,14 @@ def test_raises_with_no_formatter(execut
 @dbtest
 def test_on_error_resume(executor, exception_formatter):
     sql = "select 1; error; select 1;"
-    result = list(
-        executor.run(sql, on_error_resume=True, exception_formatter=exception_formatter)
-    )
+    result = list(executor.run(sql, on_error_resume=True, exception_formatter=exception_formatter))
     assert len(result) == 3
 
 
 @dbtest
 def test_on_error_stop(executor, exception_formatter):
     sql = "select 1; error; select 1;"
-    result = list(
-        executor.run(
-            sql, on_error_resume=False, exception_formatter=exception_formatter
-        )
-    )
+    result = list(executor.run(sql, on_error_resume=False, exception_formatter=exception_formatter))
     assert len(result) == 2
 
 
@@ -697,7 +654,7 @@ def test_on_error_stop(executor, excepti
 @dbtest
 def test_nonexistent_function_definition(executor):
     with pytest.raises(RuntimeError):
-        result = executor.view_definition("there_is_no_such_function")
+        executor.view_definition("there_is_no_such_function")
 
 
 @dbtest
@@ -713,7 +670,7 @@ def test_function_definition(executor):
             $function$
     """,
     )
-    result = executor.function_definition("the_number_three")
+    executor.function_definition("the_number_three")
 
 
 @dbtest
@@ -764,9 +721,9 @@ def test_view_definition(executor):
 @dbtest
 def test_nonexistent_view_definition(executor):
     with pytest.raises(RuntimeError):
-        result = executor.view_definition("there_is_no_such_view")
+        executor.view_definition("there_is_no_such_view")
     with pytest.raises(RuntimeError):
-        result = executor.view_definition("mvw1")
+        executor.view_definition("mvw1")
 
 
 @dbtest
@@ -775,9 +732,7 @@ def test_short_host(executor):
         assert executor.short_host == "localhost"
     with patch.object(executor, "host", "localhost.example.org"):
         assert executor.short_host == "localhost"
-    with patch.object(
-        executor, "host", "localhost1.example.org,localhost2.example.org"
-    ):
+    with patch.object(executor, "host", "localhost1.example.org,localhost2.example.org"):
         assert executor.short_host == "localhost1"
     with patch.object(executor, "host", "ec2-11-222-333-444.compute-1.amazonaws.com"):
         assert executor.short_host == "ec2-11-222-333-444"
@@ -814,9 +769,7 @@ def test_exit_without_active_connection(
         aliases=(":q",),
     )
 
-    with patch.object(
-        executor.conn, "cursor", side_effect=psycopg.InterfaceError("I'm broken!")
-    ):
+    with patch.object(executor.conn, "cursor", side_effect=psycopg.InterfaceError("I'm broken!")):
         # we should be able to quit the app, even without active connection
         run(executor, "\\q", pgspecial=pgspecial)
         quit_handler.assert_called_once()
diff -pruN 4.3.0-3/tests/test_smart_completion_multiple_schemata.py 4.4.0-1/tests/test_smart_completion_multiple_schemata.py
--- 4.3.0-3/tests/test_smart_completion_multiple_schemata.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_smart_completion_multiple_schemata.py	2025-12-24 23:39:20.000000000 +0000
@@ -11,7 +11,6 @@ from metadata import (
     wildcard_expansion,
     column,
     get_result,
-    result_set,
     qual,
     no_qual,
     parametrize,
@@ -125,9 +124,7 @@ completers = testdata.get_completers(cas
 @parametrize("table", ["users", '"users"'])
 def test_suggested_column_names_from_shadowed_visible_table(completer, table):
     result = get_result(completer, "SELECT  FROM " + table, len("SELECT "))
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("users")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("users"))
 
 
 @parametrize("completer", completers(filtr=True, casing=False, qualify=no_qual))
@@ -140,18 +137,14 @@ def test_suggested_column_names_from_sha
 )
 def test_suggested_column_names_from_qualified_shadowed_table(completer, text):
     result = get_result(completer, text, position=text.find("  ") + 1)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("users", "custom")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("users", "custom"))
 
 
 @parametrize("completer", completers(filtr=True, casing=False, qualify=no_qual))
 @parametrize("text", ["WITH users as (SELECT 1 AS foo) SELECT  from users"])
 def test_suggested_column_names_from_cte(completer, text):
     result = completions_to_set(get_result(completer, text, text.find("  ") + 1))
-    assert result == completions_to_set(
-        [column("foo")] + testdata.functions_and_keywords()
-    )
+    assert result == completions_to_set([column("foo")] + testdata.functions_and_keywords())
 
 
 @parametrize("completer", completers(casing=False))
@@ -166,14 +159,12 @@ def test_suggested_column_names_from_cte
 )
 def test_suggested_join_conditions(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [
-            alias("users"),
-            alias("shipments"),
-            name_join("shipments.id = users.id"),
-            fk_join("shipments.user_id = users.id"),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        alias("users"),
+        alias("shipments"),
+        name_join("shipments.id = users.id"),
+        fk_join("shipments.user_id = users.id"),
+    ])
 
 
 @parametrize("completer", completers(filtr=True, casing=False, aliasing=False))
@@ -192,17 +183,14 @@ def test_suggested_join_conditions(compl
 def test_suggested_joins(completer, query, tbl):
     result = get_result(completer, query.format(tbl))
     assert completions_to_set(result) == completions_to_set(
-        testdata.schemas_and_from_clause_items()
-        + [join(f"custom.shipments ON shipments.user_id = {tbl}.id")]
+        testdata.schemas_and_from_clause_items() + [join(f"custom.shipments ON shipments.user_id = {tbl}.id")]
     )
 
 
 @parametrize("completer", completers(filtr=True, casing=False, qualify=no_qual))
 def test_suggested_column_names_from_schema_qualifed_table(completer):
     result = get_result(completer, "SELECT  from custom.products", len("SELECT "))
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("products", "custom")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("products", "custom"))
 
 
 @parametrize(
@@ -216,19 +204,13 @@ def test_suggested_column_names_from_sch
 )
 @parametrize("completer", completers(filtr=True, casing=False))
 def test_suggested_columns_with_insert(completer, text):
-    assert completions_to_set(get_result(completer, text)) == completions_to_set(
-        testdata.columns("orders")
-    )
+    assert completions_to_set(get_result(completer, text)) == completions_to_set(testdata.columns("orders"))
 
 
 @parametrize("completer", completers(filtr=True, casing=False, qualify=no_qual))
 def test_suggested_column_names_in_function(completer):
-    result = get_result(
-        completer, "SELECT MAX( from custom.products", len("SELECT MAX(")
-    )
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("products", "custom")
-    )
+    result = get_result(completer, "SELECT MAX( from custom.products", len("SELECT MAX("))
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("products", "custom"))
 
 
 @parametrize("completer", completers(casing=False, aliasing=False))
@@ -237,9 +219,7 @@ def test_suggested_column_names_in_funct
     ["SELECT * FROM Custom.", "SELECT * FROM custom.", 'SELECT * FROM "custom".'],
 )
 @parametrize("use_leading_double_quote", [False, True])
-def test_suggested_table_names_with_schema_dot(
-    completer, text, use_leading_double_quote
-):
+def test_suggested_table_names_with_schema_dot(completer, text, use_leading_double_quote):
     if use_leading_double_quote:
         text += '"'
         start_position = -1
@@ -247,17 +227,13 @@ def test_suggested_table_names_with_sche
         start_position = 0
 
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.from_clause_items("custom", start_position)
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.from_clause_items("custom", start_position))
 
 
 @parametrize("completer", completers(casing=False, aliasing=False))
 @parametrize("text", ['SELECT * FROM "Custom".'])
 @parametrize("use_leading_double_quote", [False, True])
-def test_suggested_table_names_with_schema_dot2(
-    completer, text, use_leading_double_quote
-):
+def test_suggested_table_names_with_schema_dot2(completer, text, use_leading_double_quote):
     if use_leading_double_quote:
         text += '"'
         start_position = -1
@@ -265,37 +241,25 @@ def test_suggested_table_names_with_sche
         start_position = 0
 
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.from_clause_items("Custom", start_position)
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.from_clause_items("Custom", start_position))
 
 
 @parametrize("completer", completers(filtr=True, casing=False))
 def test_suggested_column_names_with_qualified_alias(completer):
     result = get_result(completer, "SELECT p. from custom.products p", len("SELECT p."))
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns("products", "custom")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns("products", "custom"))
 
 
 @parametrize("completer", completers(filtr=True, casing=False, qualify=no_qual))
 def test_suggested_multiple_column_names(completer):
-    result = get_result(
-        completer, "SELECT id,  from custom.products", len("SELECT id, ")
-    )
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("products", "custom")
-    )
+    result = get_result(completer, "SELECT id,  from custom.products", len("SELECT id, "))
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("products", "custom"))
 
 
 @parametrize("completer", completers(filtr=True, casing=False))
 def test_suggested_multiple_column_names_with_alias(completer):
-    result = get_result(
-        completer, "SELECT p.id, p. from custom.products p", len("SELECT u.id, u.")
-    )
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns("products", "custom")
-    )
+    result = get_result(completer, "SELECT p.id, p. from custom.products p", len("SELECT u.id, u."))
+    assert completions_to_set(result) == completions_to_set(testdata.columns("products", "custom"))
 
 
 @parametrize("completer", completers(filtr=True, casing=False))
@@ -307,19 +271,15 @@ def test_suggested_multiple_column_names
     ],
 )
 def test_suggestions_after_on(completer, text):
-    position = len(
-        "SELECT x.id, y.product_name FROM custom.products x JOIN custom.products y ON "
-    )
+    position = len("SELECT x.id, y.product_name FROM custom.products x JOIN custom.products y ON ")
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        [
-            alias("x"),
-            alias("y"),
-            name_join("y.price = x.price"),
-            name_join("y.product_name = x.product_name"),
-            name_join("y.id = x.id"),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        alias("x"),
+        alias("y"),
+        name_join("y.price = x.price"),
+        name_join("y.product_name = x.product_name"),
+        name_join("y.id = x.id"),
+    ])
 
 
 @parametrize("completer", completers())
@@ -333,32 +293,26 @@ def test_suggested_aliases_after_on_righ
 def test_table_names_after_from(completer):
     text = "SELECT * FROM "
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.schemas_and_from_clause_items()
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.schemas_and_from_clause_items())
 
 
 @parametrize("completer", completers(filtr=True, casing=False))
 def test_schema_qualified_function_name(completer):
     text = "SELECT custom.func"
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [
-            function("func3()", -len("func")),
-            function("set_returning_func()", -len("func")),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        function("func3()", -len("func")),
+        function("set_returning_func()", -len("func")),
+    ])
 
 
 @parametrize("completer", completers(filtr=True, casing=False, aliasing=False))
 def test_schema_qualified_function_name_after_from(completer):
     text = "SELECT * FROM custom.set_r"
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [
-            function("set_returning_func()", -len("func")),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        function("set_returning_func()", -len("func")),
+    ])
 
 
 @parametrize("completer", completers(filtr=True, casing=False, aliasing=False))
@@ -373,11 +327,9 @@ def test_unqualified_function_name_in_se
     completer.search_path = ["public", "custom"]
     text = "SELECT * FROM set_r"
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [
-            function("set_returning_func()", -len("func")),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        function("set_returning_func()", -len("func")),
+    ])
 
 
 @parametrize("completer", completers(filtr=True, casing=False))
@@ -397,12 +349,8 @@ def test_schema_qualified_type_name(comp
 
 @parametrize("completer", completers(filtr=True, casing=False))
 def test_suggest_columns_from_aliased_set_returning_function(completer):
-    result = get_result(
-        completer, "select f. from custom.set_returning_func() f", len("select f.")
-    )
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns("set_returning_func", "custom", "functions")
-    )
+    result = get_result(completer, "select f. from custom.set_returning_func() f", len("select f."))
+    assert completions_to_set(result) == completions_to_set(testdata.columns("set_returning_func", "custom", "functions"))
 
 
 @parametrize("completer", completers(filtr=True, casing=False, qualify=no_qual))
@@ -499,10 +447,7 @@ def test_wildcard_column_expansion_with_
 
     completions = get_result(completer, text, position)
 
-    cols = (
-        '"select".id, "select"."localtime", "select"."ABC", '
-        "users.id, users.phone_number"
-    )
+    cols = '"select".id, "select"."localtime", "select"."ABC", users.id, users.phone_number'
     expected = [wildcard_expansion(cols)]
     assert completions == expected
 
@@ -535,21 +480,15 @@ def test_wildcard_column_expansion_with_
 def test_suggest_columns_from_unquoted_table(completer, text):
     position = len("SELECT U.")
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns("users", "custom")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns("users", "custom"))
 
 
 @parametrize("completer", completers(filtr=True, casing=False))
-@parametrize(
-    "text", ['SELECT U. FROM custom."Users" U', 'SELECT U. FROM "custom"."Users" U']
-)
+@parametrize("text", ['SELECT U. FROM custom."Users" U', 'SELECT U. FROM "custom"."Users" U'])
 def test_suggest_columns_from_quoted_table(completer, text):
     position = len("SELECT U.")
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns("Users", "custom")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns("Users", "custom"))
 
 
 texts = ["SELECT * FROM ", "SELECT * FROM public.Orders O CROSS JOIN "]
@@ -559,9 +498,7 @@ texts = ["SELECT * FROM ", "SELECT * FRO
 @parametrize("text", texts)
 def test_schema_or_visible_table_completion(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.schemas_and_from_clause_items()
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.schemas_and_from_clause_items())
 
 
 @parametrize("completer", completers(aliasing=True, casing=False, filtr=True))
@@ -703,9 +640,7 @@ def test_column_alias_search(completer):
 
 @parametrize("completer", completers(casing=True))
 def test_column_alias_search_qualified(completer):
-    result = get_result(
-        completer, "SELECT E.ei FROM blog.Entries E", len("SELECT E.ei")
-    )
+    result = get_result(completer, "SELECT E.ei FROM blog.Entries E", len("SELECT E.ei"))
     cols = ("EntryID", "EntryTitle")
     assert result[:3] == [column(c, -2) for c in cols]
 
@@ -713,9 +648,7 @@ def test_column_alias_search_qualified(c
 @parametrize("completer", completers(casing=False, filtr=False, aliasing=False))
 def test_schema_object_order(completer):
     result = get_result(completer, "SELECT * FROM u")
-    assert result[:3] == [
-        table(t, pos=-1) for t in ("users", 'custom."Users"', "custom.users")
-    ]
+    assert result[:3] == [table(t, pos=-1) for t in ("users", 'custom."Users"', "custom.users")]
 
 
 @parametrize("completer", completers(casing=False, filtr=False, aliasing=False))
@@ -723,8 +656,7 @@ def test_all_schema_objects(completer):
     text = "SELECT * FROM "
     result = get_result(completer, text)
     assert completions_to_set(result) >= completions_to_set(
-        [table(x) for x in ("orders", '"select"', "custom.shipments")]
-        + [function(x + "()") for x in ("func2",)]
+        [table(x) for x in ("orders", '"select"', "custom.shipments")] + [function(x + "()") for x in ("func2",)]
     )
 
 
@@ -733,8 +665,7 @@ def test_all_schema_objects_with_casing(
     text = "SELECT * FROM "
     result = get_result(completer, text)
     assert completions_to_set(result) >= completions_to_set(
-        [table(x) for x in ("Orders", '"select"', "CUSTOM.shipments")]
-        + [function(x + "()") for x in ("func2",)]
+        [table(x) for x in ("Orders", '"select"', "CUSTOM.shipments")] + [function(x + "()") for x in ("func2",)]
     )
 
 
@@ -743,8 +674,7 @@ def test_all_schema_objects_with_aliases
     text = "SELECT * FROM "
     result = get_result(completer, text)
     assert completions_to_set(result) >= completions_to_set(
-        [table(x) for x in ("orders o", '"select" s', "custom.shipments s")]
-        + [function(x) for x in ("func2() f",)]
+        [table(x) for x in ("orders o", '"select" s', "custom.shipments s")] + [function(x) for x in ("func2() f",)]
     )
 
 
@@ -752,6 +682,4 @@ def test_all_schema_objects_with_aliases
 def test_set_schema(completer):
     text = "SET SCHEMA "
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [schema("'blog'"), schema("'Custom'"), schema("'custom'"), schema("'public'")]
-    )
+    assert completions_to_set(result) == completions_to_set([schema("'blog'"), schema("'Custom'"), schema("'custom'"), schema("'public'")])
diff -pruN 4.3.0-3/tests/test_smart_completion_public_schema_only.py 4.4.0-1/tests/test_smart_completion_public_schema_only.py
--- 4.3.0-3/tests/test_smart_completion_public_schema_only.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_smart_completion_public_schema_only.py	2025-12-24 23:39:20.000000000 +0000
@@ -12,7 +12,6 @@ from metadata import (
     column,
     wildcard_expansion,
     get_result,
-    result_set,
     qual,
     no_qual,
     parametrize,
@@ -68,19 +67,11 @@ cased_func_names = [
 ]
 cased_tbls = ["Users", "Orders"]
 cased_views = ["User_Emails", "Functions"]
-casing = (
-    ["SELECT", "PUBLIC"]
-    + cased_func_names
-    + cased_tbls
-    + cased_views
-    + cased_users_col_names
-    + cased_users2_col_names
-)
+casing = ["SELECT", "PUBLIC"] + cased_func_names + cased_tbls + cased_views + cased_users_col_names + cased_users2_col_names
 # Lists for use in assertions
-cased_funcs = [
-    function(f)
-    for f in ("Custom_Fun()", "_custom_fun()", "Custom_Func1()", "custom_func2()")
-] + [function("set_returning_func(x := , y := )", display="set_returning_func(x, y)")]
+cased_funcs = [function(f) for f in ("Custom_Fun()", "_custom_fun()", "Custom_Func1()", "custom_func2()")] + [
+    function("set_returning_func(x := , y := )", display="set_returning_func(x, y)")
+]
 cased_tbls = [table(t) for t in (cased_tbls + ['"Users"', '"select"'])]
 cased_rels = [view(t) for t in cased_views] + cased_funcs + cased_tbls
 cased_users_cols = [column(c) for c in cased_users_col_names]
@@ -132,25 +123,19 @@ def test_function_column_name(completer)
         len("SELECT * FROM Functions WHERE function:"),
         len("SELECT * FROM Functions WHERE function:text") + 1,
     ):
-        assert [] == get_result(
-            completer, "SELECT * FROM Functions WHERE function:text"[:l]
-        )
+        assert [] == get_result(completer, "SELECT * FROM Functions WHERE function:text"[:l])
 
 
 @parametrize("action", ["ALTER", "DROP", "CREATE", "CREATE OR REPLACE"])
 @parametrize("completer", completers())
 def test_drop_alter_function(completer, action):
-    assert get_result(completer, action + " FUNCTION set_ret") == [
-        function("set_returning_func(x integer, y integer)", -len("set_ret"))
-    ]
+    assert get_result(completer, action + " FUNCTION set_ret") == [function("set_returning_func(x integer, y integer)", -len("set_ret"))]
 
 
 @parametrize("completer", completers())
 def test_empty_string_completion(completer):
     result = get_result(completer, "")
-    assert completions_to_set(
-        testdata.keywords() + testdata.specials()
-    ) == completions_to_set(result)
+    assert completions_to_set(testdata.keywords() + testdata.specials()) == completions_to_set(result)
 
 
 @parametrize("completer", completers())
@@ -162,19 +147,17 @@ def test_select_keyword_completion(compl
 @parametrize("completer", completers())
 def test_builtin_function_name_completion(completer):
     result = get_result(completer, "SELECT MA")
-    assert completions_to_set(result) == completions_to_set(
-        [
-            function("MAKE_DATE", -2),
-            function("MAKE_INTERVAL", -2),
-            function("MAKE_TIME", -2),
-            function("MAKE_TIMESTAMP", -2),
-            function("MAKE_TIMESTAMPTZ", -2),
-            function("MASKLEN", -2),
-            function("MAX", -2),
-            keyword("MAXEXTENTS", -2),
-            keyword("MATERIALIZED VIEW", -2),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        function("MAKE_DATE", -2),
+        function("MAKE_INTERVAL", -2),
+        function("MAKE_TIME", -2),
+        function("MAKE_TIMESTAMP", -2),
+        function("MAKE_TIMESTAMPTZ", -2),
+        function("MASKLEN", -2),
+        function("MAX", -2),
+        keyword("MAXEXTENTS", -2),
+        keyword("MATERIALIZED VIEW", -2),
+    ])
 
 
 @parametrize("completer", completers())
@@ -189,58 +172,47 @@ def test_builtin_function_matches_only_a
 @parametrize("completer", completers(casing=False, aliasing=False))
 def test_user_function_name_completion(completer):
     result = get_result(completer, "SELECT cu")
-    assert completions_to_set(result) == completions_to_set(
-        [
-            function("custom_fun()", -2),
-            function("_custom_fun()", -2),
-            function("custom_func1()", -2),
-            function("custom_func2()", -2),
-            function("CURRENT_DATE", -2),
-            function("CURRENT_TIMESTAMP", -2),
-            function("CUME_DIST", -2),
-            function("CURRENT_TIME", -2),
-            keyword("CURRENT", -2),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        function("custom_fun()", -2),
+        function("_custom_fun()", -2),
+        function("custom_func1()", -2),
+        function("custom_func2()", -2),
+        function("CURRENT_DATE", -2),
+        function("CURRENT_TIMESTAMP", -2),
+        function("CUME_DIST", -2),
+        function("CURRENT_TIME", -2),
+        keyword("CURRENT", -2),
+    ])
 
 
 @parametrize("completer", completers(casing=False, aliasing=False))
 def test_user_function_name_completion_matches_anywhere(completer):
     result = get_result(completer, "SELECT om")
-    assert completions_to_set(result) == completions_to_set(
-        [
-            function("custom_fun()", -2),
-            function("_custom_fun()", -2),
-            function("custom_func1()", -2),
-            function("custom_func2()", -2),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        function("custom_fun()", -2),
+        function("_custom_fun()", -2),
+        function("custom_func1()", -2),
+        function("custom_func2()", -2),
+    ])
 
 
 @parametrize("completer", completers(casing=True))
 def test_list_functions_for_special(completer):
     result = get_result(completer, r"\df ")
-    assert completions_to_set(result) == completions_to_set(
-        [schema("PUBLIC")] + [function(f) for f in cased_func_names]
-    )
+    assert completions_to_set(result) == completions_to_set([schema("PUBLIC")] + [function(f) for f in cased_func_names])
 
 
 @parametrize("completer", completers(casing=False, qualify=no_qual))
 def test_suggested_column_names_from_visible_table(completer):
     result = get_result(completer, "SELECT  from users", len("SELECT "))
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("users")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("users"))
 
 
 @parametrize("completer", completers(casing=True, qualify=no_qual))
 def test_suggested_cased_column_names(completer):
     result = get_result(completer, "SELECT  from users", len("SELECT "))
     assert completions_to_set(result) == completions_to_set(
-        cased_funcs
-        + cased_users_cols
-        + testdata.builtin_functions()
-        + testdata.keywords()
+        cased_funcs + cased_users_cols + testdata.builtin_functions() + testdata.keywords()
     )
 
 
@@ -250,9 +222,7 @@ def test_suggested_auto_qualified_column
     position = text.index("  ") + 1
     cols = [column(c.lower()) for c in cased_users_col_names]
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        cols + testdata.functions_and_keywords()
-    )
+    assert completions_to_set(result) == completions_to_set(cols + testdata.functions_and_keywords())
 
 
 @parametrize("completer", completers(casing=False, qualify=qual))
@@ -268,9 +238,7 @@ def test_suggested_auto_qualified_column
     cols = [column("U." + c.lower()) for c in cased_users_col_names]
     cols += [column('"Users".' + c.lower()) for c in cased_users2_col_names]
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        cols + testdata.functions_and_keywords()
-    )
+    assert completions_to_set(result) == completions_to_set(cols + testdata.functions_and_keywords())
 
 
 @parametrize("completer", completers(casing=True, qualify=["always"]))
@@ -287,17 +255,13 @@ def test_suggested_cased_always_qualifie
     position = len("SELECT ")
     cols = [column("users." + c) for c in cased_users_col_names]
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        cased_funcs + cols + testdata.builtin_functions() + testdata.keywords()
-    )
+    assert completions_to_set(result) == completions_to_set(cased_funcs + cols + testdata.builtin_functions() + testdata.keywords())
 
 
 @parametrize("completer", completers(casing=False, qualify=no_qual))
 def test_suggested_column_names_in_function(completer):
     result = get_result(completer, "SELECT MAX( from users", len("SELECT MAX("))
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("users")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("users"))
 
 
 @parametrize("completer", completers(casing=False))
@@ -315,24 +279,18 @@ def test_suggested_column_names_with_ali
 @parametrize("completer", completers(casing=False, qualify=no_qual))
 def test_suggested_multiple_column_names(completer):
     result = get_result(completer, "SELECT id,  from users u", len("SELECT id, "))
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("users")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("users"))
 
 
 @parametrize("completer", completers(casing=False))
 def test_suggested_multiple_column_names_with_alias(completer):
-    result = get_result(
-        completer, "SELECT u.id, u. from users u", len("SELECT u.id, u.")
-    )
+    result = get_result(completer, "SELECT u.id, u. from users u", len("SELECT u.id, u."))
     assert completions_to_set(result) == completions_to_set(testdata.columns("users"))
 
 
 @parametrize("completer", completers(casing=True))
 def test_suggested_cased_column_names_with_alias(completer):
-    result = get_result(
-        completer, "SELECT u.id, u. from users u", len("SELECT u.id, u.")
-    )
+    result = get_result(completer, "SELECT u.id, u. from users u", len("SELECT u.id, u."))
     assert completions_to_set(result) == completions_to_set(cased_users_cols)
 
 
@@ -378,18 +336,14 @@ join_condition_texts = [
 @parametrize("text", join_condition_texts)
 def test_suggested_join_conditions(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [alias("U"), alias("U2"), fk_join("U2.userid = U.id")]
-    )
+    assert completions_to_set(result) == completions_to_set([alias("U"), alias("U2"), fk_join("U2.userid = U.id")])
 
 
 @parametrize("completer", completers(casing=True))
 @parametrize("text", join_condition_texts)
 def test_cased_join_conditions(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [alias("U"), alias("U2"), fk_join("U2.UserID = U.ID")]
-    )
+    assert completions_to_set(result) == completions_to_set([alias("U"), alias("U2"), fk_join("U2.UserID = U.ID")])
 
 
 @parametrize("completer", completers(casing=False))
@@ -435,9 +389,7 @@ def test_suggested_join_conditions_with_
 )
 def test_suggested_join_conditions_with_invalid_table(completer, text, ref):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [alias("users"), alias(ref)]
-    )
+    assert completions_to_set(result) == completions_to_set([alias("users"), alias(ref)])
 
 
 @parametrize("completer", completers(casing=False, aliasing=False))
@@ -531,8 +483,7 @@ def test_aliased_joins(completer, text):
 def test_suggested_joins_quoted_schema_qualified_table(completer, text):
     result = get_result(completer, text)
     assert completions_to_set(result) == completions_to_set(
-        testdata.schemas_and_from_clause_items()
-        + [join('public.users ON users.id = "Users".userid')]
+        testdata.schemas_and_from_clause_items() + [join('public.users ON users.id = "Users".userid')]
     )
 
 
@@ -547,14 +498,12 @@ def test_suggested_joins_quoted_schema_q
 def test_suggested_aliases_after_on(completer, text):
     position = len("SELECT u.name, o.id FROM users u JOIN orders o ON ")
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        [
-            alias("u"),
-            name_join("o.id = u.id"),
-            name_join("o.email = u.email"),
-            alias("o"),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        alias("u"),
+        name_join("o.id = u.id"),
+        name_join("o.email = u.email"),
+        alias("o"),
+    ])
 
 
 @parametrize("completer", completers())
@@ -582,14 +531,12 @@ def test_suggested_aliases_after_on_righ
 def test_suggested_tables_after_on(completer, text):
     position = len("SELECT users.name, orders.id FROM users JOIN orders ON ")
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        [
-            name_join("orders.id = users.id"),
-            name_join("orders.email = users.email"),
-            alias("users"),
-            alias("orders"),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        name_join("orders.id = users.id"),
+        name_join("orders.email = users.email"),
+        alias("users"),
+        alias("orders"),
+    ])
 
 
 @parametrize("completer", completers(casing=False))
@@ -601,13 +548,9 @@ def test_suggested_tables_after_on(compl
     ],
 )
 def test_suggested_tables_after_on_right_side(completer, text):
-    position = len(
-        "SELECT users.name, orders.id FROM users JOIN orders ON orders.user_id = "
-    )
+    position = len("SELECT users.name, orders.id FROM users JOIN orders ON orders.user_id = ")
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        [alias("users"), alias("orders")]
-    )
+    assert completions_to_set(result) == completions_to_set([alias("users"), alias("orders")])
 
 
 @parametrize("completer", completers(casing=False))
@@ -620,9 +563,7 @@ def test_suggested_tables_after_on_right
 )
 def test_join_using_suggests_common_columns(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [column("id"), column("email")]
-    )
+    assert completions_to_set(result) == completions_to_set([column("id"), column("email")])
 
 
 @parametrize("completer", completers(casing=False))
@@ -638,9 +579,7 @@ def test_join_using_suggests_common_colu
 def test_join_using_suggests_from_last_table(completer, text):
     position = text.index("()") + 1
     result = get_result(completer, text, position)
-    assert completions_to_set(result) == completions_to_set(
-        [column("id"), column("email")]
-    )
+    assert completions_to_set(result) == completions_to_set([column("id"), column("email")])
 
 
 @parametrize("completer", completers(casing=False))
@@ -653,9 +592,7 @@ def test_join_using_suggests_from_last_t
 )
 def test_join_using_suggests_columns_after_first_column(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [column("id"), column("email")]
-    )
+    assert completions_to_set(result) == completions_to_set([column("id"), column("email")])
 
 
 @parametrize("completer", completers(casing=False, aliasing=False))
@@ -669,9 +606,7 @@ def test_join_using_suggests_columns_aft
 )
 def test_table_names_after_from(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.schemas_and_from_clause_items()
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.schemas_and_from_clause_items())
     assert [c.text for c in result] == [
         "public",
         "orders",
@@ -691,9 +626,7 @@ def test_table_names_after_from(complete
 @parametrize("completer", completers(casing=False, qualify=no_qual))
 def test_auto_escaped_col_names(completer):
     result = get_result(completer, 'SELECT  from "select"', len("SELECT "))
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("select")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("select"))
 
 
 @parametrize("completer", completers(aliasing=False))
@@ -717,9 +650,7 @@ def test_allow_leading_double_quote_in_l
 )
 def test_suggest_datatype(text, completer):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.schemas() + testdata.types() + testdata.builtin_datatypes()
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.schemas() + testdata.types() + testdata.builtin_datatypes())
 
 
 @parametrize("completer", completers(casing=False))
@@ -731,19 +662,13 @@ def test_suggest_columns_from_escaped_ta
 @parametrize("completer", completers(casing=False, qualify=no_qual))
 def test_suggest_columns_from_set_returning_function(completer):
     result = get_result(completer, "select  from set_returning_func()", len("select "))
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns_functions_and_keywords("set_returning_func", typ="functions")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns_functions_and_keywords("set_returning_func", typ="functions"))
 
 
 @parametrize("completer", completers(casing=False))
 def test_suggest_columns_from_aliased_set_returning_function(completer):
-    result = get_result(
-        completer, "select f. from set_returning_func() f", len("select f.")
-    )
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns("set_returning_func", typ="functions")
-    )
+    result = get_result(completer, "select f. from set_returning_func() f", len("select f."))
+    assert completions_to_set(result) == completions_to_set(testdata.columns("set_returning_func", typ="functions"))
 
 
 @parametrize("completer", completers(casing=False))
@@ -751,9 +676,7 @@ def test_join_functions_using_suggests_c
     text = """SELECT * FROM set_returning_func() f1
               INNER JOIN set_returning_func() f2 USING ("""
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.columns("set_returning_func", typ="functions")
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.columns("set_returning_func", typ="functions"))
 
 
 @parametrize("completer", completers(casing=False))
@@ -762,8 +685,7 @@ def test_join_functions_on_suggests_colu
               INNER JOIN set_returning_func() f2 ON f1."""
     result = get_result(completer, text)
     assert completions_to_set(result) == completions_to_set(
-        [name_join("y = f2.y"), name_join("x = f2.x")]
-        + testdata.columns("set_returning_func", typ="functions")
+        [name_join("y = f2.y"), name_join("x = f2.x")] + testdata.columns("set_returning_func", typ="functions")
     )
 
 
@@ -880,10 +802,7 @@ def test_wildcard_column_expansion_with_
 
     completions = get_result(completer, text, position)
 
-    cols = (
-        '"select".id, "select".insert, "select"."ABC", '
-        "u.id, u.parentid, u.email, u.first_name, u.last_name"
-    )
+    cols = '"select".id, "select".insert, "select"."ABC", u.id, u.parentid, u.email, u.first_name, u.last_name'
     expected = [wildcard_expansion(cols)]
     assert completions == expected
 
@@ -922,18 +841,14 @@ def test_suggest_columns_from_quoted_tab
 @parametrize("text", ["SELECT * FROM ", "SELECT * FROM Orders o CROSS JOIN "])
 def test_schema_or_visible_table_completion(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.schemas_and_from_clause_items()
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.schemas_and_from_clause_items())
 
 
 @parametrize("completer", completers(casing=False, aliasing=True))
 @parametrize("text", ["SELECT * FROM "])
 def test_table_aliases(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        testdata.schemas() + aliased_rels
-    )
+    assert completions_to_set(result) == completions_to_set(testdata.schemas() + aliased_rels)
 
 
 @parametrize("completer", completers(casing=False, aliasing=True))
@@ -965,43 +880,37 @@ def test_duplicate_table_aliases(complet
 @parametrize("text", ["SELECT * FROM Orders o CROSS JOIN "])
 def test_duplicate_aliases_with_casing(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [
-            schema("PUBLIC"),
-            table("Orders O2"),
-            table("Users U"),
-            table('"Users" U'),
-            table('"select" s'),
-            view("User_Emails UE"),
-            view("Functions F"),
-            function("_custom_fun() cf"),
-            function("Custom_Fun() CF"),
-            function("Custom_Func1() CF"),
-            function("custom_func2() cf"),
-            function(
-                "set_returning_func(x := , y := ) srf",
-                display="set_returning_func(x, y) srf",
-            ),
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        schema("PUBLIC"),
+        table("Orders O2"),
+        table("Users U"),
+        table('"Users" U'),
+        table('"select" s'),
+        view("User_Emails UE"),
+        view("Functions F"),
+        function("_custom_fun() cf"),
+        function("Custom_Fun() CF"),
+        function("Custom_Func1() CF"),
+        function("custom_func2() cf"),
+        function(
+            "set_returning_func(x := , y := ) srf",
+            display="set_returning_func(x, y) srf",
+        ),
+    ])
 
 
 @parametrize("completer", completers(casing=True, aliasing=True))
 @parametrize("text", ["SELECT * FROM "])
 def test_aliases_with_casing(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [schema("PUBLIC")] + cased_aliased_rels
-    )
+    assert completions_to_set(result) == completions_to_set([schema("PUBLIC")] + cased_aliased_rels)
 
 
 @parametrize("completer", completers(casing=True, aliasing=False))
 @parametrize("text", ["SELECT * FROM "])
 def test_table_casing(completer, text):
     result = get_result(completer, text)
-    assert completions_to_set(result) == completions_to_set(
-        [schema("PUBLIC")] + cased_rels
-    )
+    assert completions_to_set(result) == completions_to_set([schema("PUBLIC")] + cased_rels)
 
 
 @parametrize("completer", completers(casing=False))
@@ -1028,12 +937,10 @@ def test_suggest_cte_names(completer):
         SELECT * FROM
     """
     result = get_result(completer, text)
-    expected = completions_to_set(
-        [
-            Completion("cte1", 0, display_meta="table"),
-            Completion("cte2", 0, display_meta="table"),
-        ]
-    )
+    expected = completions_to_set([
+        Completion("cte1", 0, display_meta="table"),
+        Completion("cte2", 0, display_meta="table"),
+    ])
     assert expected <= completions_to_set(result)
 
 
@@ -1101,12 +1008,10 @@ def test_set_schema(completer):
 @parametrize("completer", completers())
 def test_special_name_completion(completer):
     result = get_result(completer, "\\t")
-    assert completions_to_set(result) == completions_to_set(
-        [
-            Completion(
-                text="\\timing",
-                start_position=-2,
-                display_meta="Toggle timing of commands.",
-            )
-        ]
-    )
+    assert completions_to_set(result) == completions_to_set([
+        Completion(
+            text="\\timing",
+            start_position=-2,
+            display_meta="Toggle timing of commands.",
+        )
+    ])
diff -pruN 4.3.0-3/tests/test_sqlcompletion.py 4.4.0-1/tests/test_sqlcompletion.py
--- 4.3.0-3/tests/test_sqlcompletion.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_sqlcompletion.py	2025-12-24 23:39:20.000000000 +0000
@@ -18,9 +18,7 @@ from pgcli.packages.parseutils.tables im
 import pytest
 
 
-def cols_etc(
-    table, schema=None, alias=None, is_function=False, parent=None, last_keyword=None
-):
+def cols_etc(table, schema=None, alias=None, is_function=False, parent=None, last_keyword=None):
     """Returns the expected select-clause suggestions for a single-table
     select."""
     return {
@@ -46,7 +44,7 @@ def test_select_suggests_cols_with_quali
 def test_cte_does_not_crash():
     sql = "WITH CTE AS (SELECT F.* FROM Foo F WHERE F.Bar > 23) SELECT C.* FROM CTE C WHERE C.FooID BETWEEN 123 AND 234;"
     for i in range(len(sql)):
-        suggestions = suggest_type(sql[: i + 1], sql[: i + 1])
+        suggest_type(sql[: i + 1], sql[: i + 1])
 
 
 @pytest.mark.parametrize("expression", ['SELECT * FROM "tabl" WHERE '])
@@ -117,9 +115,7 @@ def test_select_suggests_cols_and_funcs(
     }
 
 
-@pytest.mark.parametrize(
-    "expression", ["INSERT INTO ", "COPY ", "UPDATE ", "DESCRIBE "]
-)
+@pytest.mark.parametrize("expression", ["INSERT INTO ", "COPY ", "UPDATE ", "DESCRIBE "])
 def test_suggests_tables_views_and_schemas(expression):
     suggestions = suggest_type(expression, expression)
     assert set(suggestions) == {Table(schema=None), View(schema=None), Schema()}
@@ -140,7 +136,7 @@ def test_suggest_tables_views_schemas_an
 )
 def test_suggest_after_join_with_two_tables(expression):
     suggestions = suggest_type(expression, expression)
-    tables = tuple([(None, "foo", None, False), (None, "bar", None, False)])
+    tables = ((None, "foo", None, False), (None, "bar", None, False))
     assert set(suggestions) == {
         FromClauseItem(schema=None, table_refs=tables),
         Join(tables, None),
@@ -148,9 +144,7 @@ def test_suggest_after_join_with_two_tab
     }
 
 
-@pytest.mark.parametrize(
-    "expression", ["SELECT * FROM foo JOIN ", "SELECT * FROM foo JOIN bar"]
-)
+@pytest.mark.parametrize("expression", ["SELECT * FROM foo JOIN ", "SELECT * FROM foo JOIN bar"])
 def test_suggest_after_join_with_one_table(expression):
     suggestions = suggest_type(expression, expression)
     tables = ((None, "foo", None, False),)
@@ -161,9 +155,7 @@ def test_suggest_after_join_with_one_tab
     }
 
 
-@pytest.mark.parametrize(
-    "expression", ["INSERT INTO sch.", "COPY sch.", "DESCRIBE sch."]
-)
+@pytest.mark.parametrize("expression", ["INSERT INTO sch.", "COPY sch.", "DESCRIBE sch."])
 def test_suggest_qualified_tables_and_views(expression):
     suggestions = suggest_type(expression, expression)
     assert set(suggestions) == {Table(schema="sch"), View(schema="sch")}
@@ -193,7 +185,7 @@ def test_suggest_qualified_tables_views_
 @pytest.mark.parametrize("expression", ["SELECT * FROM foo JOIN sch."])
 def test_suggest_qualified_tables_views_functions_and_joins(expression):
     suggestions = suggest_type(expression, expression)
-    tbls = tuple([(None, "foo", None, False)])
+    tbls = ((None, "foo", None, False),)
     assert set(suggestions) == {
         FromClauseItem(schema="sch", table_refs=tbls),
         Join(tbls, "sch"),
@@ -210,9 +202,7 @@ def test_truncate_suggests_qualified_tab
     assert set(suggestions) == {Table(schema="sch")}
 
 
-@pytest.mark.parametrize(
-    "text", ["SELECT DISTINCT ", "INSERT INTO foo SELECT DISTINCT "]
-)
+@pytest.mark.parametrize("text", ["SELECT DISTINCT ", "INSERT INTO foo SELECT DISTINCT "])
 def test_distinct_suggests_cols(text):
     suggestions = suggest_type(text, text)
     assert set(suggestions) == {
@@ -233,9 +223,7 @@ def test_distinct_suggests_cols(text):
         ),
     ],
 )
-def test_distinct_and_order_by_suggestions_with_aliases(
-    text, text_before, last_keyword
-):
+def test_distinct_and_order_by_suggestions_with_aliases(text, text_before, last_keyword):
     suggestions = suggest_type(text, text_before)
     assert set(suggestions) == {
         Column(
@@ -309,34 +297,24 @@ def test_into_suggests_tables_and_schema
     assert set(suggestion) == {Table(schema=None), View(schema=None), Schema()}
 
 
-@pytest.mark.parametrize(
-    "text", ["INSERT INTO abc (", "INSERT INTO abc () SELECT * FROM hij;"]
-)
+@pytest.mark.parametrize("text", ["INSERT INTO abc (", "INSERT INTO abc () SELECT * FROM hij;"])
 def test_insert_into_lparen_suggests_cols(text):
     suggestions = suggest_type(text, "INSERT INTO abc (")
-    assert suggestions == (
-        Column(table_refs=((None, "abc", None, False),), context="insert"),
-    )
+    assert suggestions == (Column(table_refs=((None, "abc", None, False),), context="insert"),)
 
 
 def test_insert_into_lparen_partial_text_suggests_cols():
     suggestions = suggest_type("INSERT INTO abc (i", "INSERT INTO abc (i")
-    assert suggestions == (
-        Column(table_refs=((None, "abc", None, False),), context="insert"),
-    )
+    assert suggestions == (Column(table_refs=((None, "abc", None, False),), context="insert"),)
 
 
 def test_insert_into_lparen_comma_suggests_cols():
     suggestions = suggest_type("INSERT INTO abc (id,", "INSERT INTO abc (id,")
-    assert suggestions == (
-        Column(table_refs=((None, "abc", None, False),), context="insert"),
-    )
+    assert suggestions == (Column(table_refs=((None, "abc", None, False),), context="insert"),)
 
 
 def test_partially_typed_col_name_suggests_col_names():
-    suggestions = suggest_type(
-        "SELECT * FROM tabl WHERE col_n", "SELECT * FROM tabl WHERE col_n"
-    )
+    suggestions = suggest_type("SELECT * FROM tabl WHERE col_n", "SELECT * FROM tabl WHERE col_n")
     assert set(suggestions) == cols_etc("tabl", last_keyword="WHERE")
 
 
@@ -389,9 +367,7 @@ def test_dot_suggests_cols_of_an_alias_w
 
 
 def test_dot_col_comma_suggests_cols_or_schema_qualified_table():
-    suggestions = suggest_type(
-        "SELECT t1.a, t2. FROM tabl1 t1, tabl2 t2", "SELECT t1.a, t2."
-    )
+    suggestions = suggest_type("SELECT t1.a, t2. FROM tabl1 t1, tabl2 t2", "SELECT t1.a, t2.")
     assert set(suggestions) == {
         Column(table_refs=((None, "tabl2", "t2", False),)),
         Table(schema="t2"),
@@ -452,14 +428,12 @@ def test_sub_select_table_name_completio
 )
 def test_sub_select_table_name_completion_with_outer_table(expression):
     suggestion = suggest_type(expression, expression)
-    tbls = tuple([(None, "foo", None, False)])
+    tbls = ((None, "foo", None, False),)
     assert set(suggestion) == {FromClauseItem(schema=None, table_refs=tbls), Schema()}
 
 
 def test_sub_select_col_name_completion():
-    suggestions = suggest_type(
-        "SELECT * FROM (SELECT  FROM abc", "SELECT * FROM (SELECT "
-    )
+    suggestions = suggest_type("SELECT * FROM (SELECT  FROM abc", "SELECT * FROM (SELECT ")
     assert set(suggestions) == {
         Column(table_refs=((None, "abc", None, False),), qualifiable=True),
         Function(schema=None),
@@ -469,16 +443,12 @@ def test_sub_select_col_name_completion(
 
 @pytest.mark.xfail
 def test_sub_select_multiple_col_name_completion():
-    suggestions = suggest_type(
-        "SELECT * FROM (SELECT a, FROM abc", "SELECT * FROM (SELECT a, "
-    )
+    suggestions = suggest_type("SELECT * FROM (SELECT a, FROM abc", "SELECT * FROM (SELECT a, ")
     assert set(suggestions) == cols_etc("abc")
 
 
 def test_sub_select_dot_col_name_completion():
-    suggestions = suggest_type(
-        "SELECT * FROM (SELECT t. FROM tabl t", "SELECT * FROM (SELECT t."
-    )
+    suggestions = suggest_type("SELECT * FROM (SELECT t. FROM tabl t", "SELECT * FROM (SELECT t.")
     assert set(suggestions) == {
         Column(table_refs=((None, "tabl", "t", False),)),
         Table(schema="t"),
@@ -492,7 +462,7 @@ def test_sub_select_dot_col_name_complet
 def test_join_suggests_tables_and_schemas(tbl_alias, join_type):
     text = f"SELECT * FROM abc {tbl_alias} {join_type} JOIN "
     suggestion = suggest_type(text, text)
-    tbls = tuple([(None, "abc", tbl_alias or None, False)])
+    tbls = ((None, "abc", tbl_alias or None, False),)
     assert set(suggestion) == {
         FromClauseItem(schema=None, table_refs=tbls),
         Schema(),
@@ -505,7 +475,7 @@ def test_left_join_with_comma():
     suggestions = suggest_type(text, text)
     # tbls should also include (None, 'bar', 'b', False)
     # but there's a bug with commas
-    tbls = tuple([(None, "foo", "f", False)])
+    tbls = ((None, "foo", "f", False),)
     assert set(suggestions) == {FromClauseItem(schema=None, table_refs=tbls), Schema()}
 
 
@@ -627,9 +597,7 @@ def test_on_suggests_tables_and_join_con
 )
 def test_join_using_suggests_common_columns(text):
     tables = ((None, "abc", None, False), (None, "def", None, False))
-    assert set(suggest_type(text, text)) == {
-        Column(table_refs=tables, require_last_table=True)
-    }
+    assert set(suggest_type(text, text)) == {Column(table_refs=tables, require_last_table=True)}
 
 
 def test_suggest_columns_after_multiple_joins():
@@ -643,14 +611,10 @@ def test_suggest_columns_after_multiple_
 
 
 def test_2_statements_2nd_current():
-    suggestions = suggest_type(
-        "select * from a; select * from ", "select * from a; select * from "
-    )
+    suggestions = suggest_type("select * from a; select * from ", "select * from a; select * from ")
     assert set(suggestions) == {FromClauseItem(schema=None), Schema()}
 
-    suggestions = suggest_type(
-        "select * from a; select  from b", "select * from a; select "
-    )
+    suggestions = suggest_type("select * from a; select  from b", "select * from a; select ")
     assert set(suggestions) == {
         Column(table_refs=((None, "b", None, False),), qualifiable=True),
         Function(schema=None),
@@ -658,9 +622,7 @@ def test_2_statements_2nd_current():
     }
 
     # Should work even if first statement is invalid
-    suggestions = suggest_type(
-        "select * from; select * from ", "select * from; select * from "
-    )
+    suggestions = suggest_type("select * from; select * from ", "select * from; select * from ")
     assert set(suggestions) == {FromClauseItem(schema=None), Schema()}
 
 
@@ -679,9 +641,7 @@ def test_3_statements_2nd_current():
     )
     assert set(suggestions) == {FromClauseItem(schema=None), Schema()}
 
-    suggestions = suggest_type(
-        "select * from a; select  from b; select * from c", "select * from a; select "
-    )
+    suggestions = suggest_type("select * from a; select  from b; select * from c", "select * from a; select ")
     assert set(suggestions) == cols_etc("b", last_keyword="SELECT")
 
 
@@ -773,9 +733,7 @@ def test_statements_with_cursor_before_f
 
 
 def test_create_db_with_template():
-    suggestions = suggest_type(
-        "create database foo with template ", "create database foo with template "
-    )
+    suggestions = suggest_type("create database foo with template ", "create database foo with template ")
 
     assert set(suggestions) == {Database()}
 
@@ -814,9 +772,7 @@ def test_cast_operator_suggests_types(te
     }
 
 
-@pytest.mark.parametrize(
-    "text", ["SELECT foo::bar.", "SELECT foo::bar.baz", "SELECT (x + y)::bar."]
-)
+@pytest.mark.parametrize("text", ["SELECT foo::bar.", "SELECT foo::bar.baz", "SELECT (x + y)::bar."])
 def test_cast_operator_suggests_schema_qualified_types(text):
     assert set(suggest_type(text, text)) == {
         Datatype(schema="bar"),
@@ -962,3 +918,13 @@ def test_handle_unrecognized_kw_generous
 @pytest.mark.parametrize("sql", ["ALTER ", "ALTER TABLE foo ALTER "])
 def test_keyword_after_alter(sql):
     assert Keyword("ALTER") in set(suggest_type(sql, sql))
+
+
+def test_suggestion_when_setting_search_path():
+    sql_set = "SET "
+    suggestion_set = suggest_type(sql_set, sql_set)
+    assert set(suggestion_set) == {Keyword("SET")}
+
+    sql_set_search_path_to = "SET search_path TO "
+    suggestion_set_search_path_to = suggest_type(sql_set_search_path_to, sql_set_search_path_to)
+    assert set(suggestion_set_search_path_to) == {Schema()}
diff -pruN 4.3.0-3/tests/test_ssh_tunnel.py 4.4.0-1/tests/test_ssh_tunnel.py
--- 4.3.0-3/tests/test_ssh_tunnel.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/test_ssh_tunnel.py	2025-12-24 23:39:20.000000000 +0000
@@ -12,9 +12,7 @@ from pgcli.pgexecute import PGExecute
 
 @pytest.fixture
 def mock_ssh_tunnel_forwarder() -> MagicMock:
-    mock_ssh_tunnel_forwarder = MagicMock(
-        SSHTunnelForwarder, local_bind_ports=[1111], autospec=True
-    )
+    mock_ssh_tunnel_forwarder = MagicMock(SSHTunnelForwarder, local_bind_ports=[1111], autospec=True)
     with patch(
         "pgcli.main.sshtunnel.SSHTunnelForwarder",
         return_value=mock_ssh_tunnel_forwarder,
@@ -28,9 +26,7 @@ def mock_pgexecute() -> MagicMock:
         yield mock_pgexecute
 
 
-def test_ssh_tunnel(
-    mock_ssh_tunnel_forwarder: MagicMock, mock_pgexecute: MagicMock
-) -> None:
+def test_ssh_tunnel(mock_ssh_tunnel_forwarder: MagicMock, mock_pgexecute: MagicMock) -> None:
     # Test with just a host
     tunnel_url = "some.host"
     db_params = {
@@ -103,18 +99,12 @@ def test_ssh_tunnel(
     mock_pgexecute.reset_mock()
 
     # Test with DSN
-    dsn = (
-        f"user={db_params['user']} password={db_params['passwd']} "
-        f"host={db_params['host']} port={db_params['port']}"
-    )
+    dsn = f"user={db_params['user']} password={db_params['passwd']} host={db_params['host']} port={db_params['port']}"
 
     pgcli = PGCli(ssh_tunnel_url=tunnel_url)
     pgcli.connect(dsn=dsn)
 
-    expected_dsn = (
-        f"user={db_params['user']} password={db_params['passwd']} "
-        f"host=127.0.0.1 port={pgcli.ssh_tunnel.local_bind_ports[0]}"
-    )
+    expected_dsn = f"user={db_params['user']} password={db_params['passwd']} host=127.0.0.1 port={pgcli.ssh_tunnel.local_bind_ports[0]}"
 
     mock_ssh_tunnel_forwarder.assert_called_once_with(**expected_tunnel_params)
     mock_pgexecute.assert_called_once()
@@ -126,18 +116,14 @@ def test_ssh_tunnel(
 def test_cli_with_tunnel() -> None:
     runner = CliRunner()
     tunnel_url = "mytunnel"
-    with patch.object(
-        PGCli, "__init__", autospec=True, return_value=None
-    ) as mock_pgcli:
+    with patch.object(PGCli, "__init__", autospec=True, return_value=None) as mock_pgcli:
         runner.invoke(cli, ["--ssh-tunnel", tunnel_url])
         mock_pgcli.assert_called_once()
         call_args, call_kwargs = mock_pgcli.call_args
         assert call_kwargs["ssh_tunnel_url"] == tunnel_url
 
 
-def test_config(
-    tmpdir: os.PathLike, mock_ssh_tunnel_forwarder: MagicMock, mock_pgexecute: MagicMock
-) -> None:
+def test_config(tmpdir: os.PathLike, mock_ssh_tunnel_forwarder: MagicMock, mock_pgexecute: MagicMock) -> None:
     pgclirc = str(tmpdir.join("rcfile"))
 
     tunnel_user = "tunnel_user"
diff -pruN 4.3.0-3/tests/utils.py 4.4.0-1/tests/utils.py
--- 4.3.0-3/tests/utils.py	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tests/utils.py	2025-12-24 23:39:20.000000000 +0000
@@ -27,7 +27,7 @@ try:
     SERVER_VERSION = conn.info.parameter_status("server_version")
     JSON_AVAILABLE = True
     JSONB_AVAILABLE = True
-except Exception as x:
+except Exception:
     CAN_CONNECT_TO_DB = JSON_AVAILABLE = JSONB_AVAILABLE = False
     SERVER_VERSION = 0
 
@@ -38,21 +38,17 @@ dbtest = pytest.mark.skipif(
 )
 
 
-requires_json = pytest.mark.skipif(
-    not JSON_AVAILABLE, reason="Postgres server unavailable or json type not defined"
-)
+requires_json = pytest.mark.skipif(not JSON_AVAILABLE, reason="Postgres server unavailable or json type not defined")
 
 
-requires_jsonb = pytest.mark.skipif(
-    not JSONB_AVAILABLE, reason="Postgres server unavailable or jsonb type not defined"
-)
+requires_jsonb = pytest.mark.skipif(not JSONB_AVAILABLE, reason="Postgres server unavailable or jsonb type not defined")
 
 
 def create_db(dbname):
     with db_connection().cursor() as cur:
         try:
             cur.execute("""CREATE DATABASE _test_db""")
-        except:
+        except Exception:
             pass
 
 
@@ -67,16 +63,12 @@ def drop_tables(conn):
         )
 
 
-def run(
-    executor, sql, join=False, expanded=False, pgspecial=None, exception_formatter=None
-):
+def run(executor, sql, join=False, expanded=False, pgspecial=None, exception_formatter=None):
     "Return string output for the sql to be run"
 
     results = executor.run(sql, pgspecial, exception_formatter)
     formatted = []
-    settings = OutputSettings(
-        table_format="psql", dcmlfmt="d", floatfmt="g", expanded=expanded
-    )
+    settings = OutputSettings(table_format="psql", dcmlfmt="d", floatfmt="g", expanded=expanded)
     for title, rows, headers, status, sql, success, is_special in results:
         formatted.extend(format_output(title, rows, headers, status, settings))
     if join:
@@ -86,7 +78,4 @@ def run(
 
 
 def completions_to_set(completions):
-    return {
-        (completion.display_text, completion.display_meta_text)
-        for completion in completions
-    }
+    return {(completion.display_text, completion.display_meta_text) for completion in completions}
diff -pruN 4.3.0-3/tox.ini 4.4.0-1/tox.ini
--- 4.3.0-3/tox.ini	2025-03-22 22:01:50.000000000 +0000
+++ 4.4.0-1/tox.ini	2025-12-24 23:39:20.000000000 +0000
@@ -1,14 +1,31 @@
 [tox]
-envlist = py39, py310, py311, py312, py313
+envlist = py
+
 [testenv]
-deps = pytest>=2.7.0,<=3.0.7
-    mock>=1.0.1
-    behave>=1.2.4
-    pexpect==3.3
-    sshtunnel>=0.4.0
-commands = py.test
-    behave tests/features
+skip_install = true
+deps = uv
+commands = uv pip install -e .[dev]
+        coverage run -m pytest -v tests
+        coverage report -m
 passenv = PGHOST
     PGPORT
     PGUSER
     PGPASSWORD
+
+[testenv:style]
+skip_install = true
+deps = ruff
+commands = ruff check
+        ruff format --diff
+
+[testenv:integration]
+skip_install = true
+deps = uv
+commands = uv pip install -e .[dev]
+        behave tests/features --no-capture
+
+[testenv:rest]
+skip_install = true
+deps = uv
+commands = uv pip install -e .[dev]
+        docutils --halt=warning changelog.rst
