diff -pruN 1.10.6-3/debian/changelog 1.23.0-0ubuntu1/debian/changelog
--- 1.10.6-3/debian/changelog	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/changelog	2021-12-07 16:44:13.000000000 +0000
@@ -1,23 +1,22 @@
-pg8000 (1.10.6-3) unstable; urgency=medium
+pg8000 (1.23.0-0ubuntu1) jammy; urgency=medium
 
-  [ Debian Janitor ]
-  * Bump debhelper from old 10 to 12.
-  * Update renamed lintian tag names in lintian overrides.
-  * Set upstream metadata fields: Bug-Database, Bug-Submit, Repository,
-    Repository-Browse.
-
-  [ Ondřej Nový ]
-  * d/control: Update Maintainer field with new Debian Python Team
-    contact address.
-  * d/control: Update Vcs-* fields with new Debian Python Team Salsa
-    layout.
-
-  [ Debian Janitor ]
-  * Apply multi-arch hints.
-    + python-pg8000-doc: Add Multi-Arch: foreign.
-  * Bump debhelper from old 12 to 13.
+  * d/gbp.conf: Update gbp configuration file.
+  * d/control: Update Vcs-* links and maintainers.
+  * New upstream release.
+  * d/control: Align (Build-)Depends with upstream.
+  * d/p/0001-Don-t-try-to-encode-non-text.patch: Dropped. No longer needed.
+  * d/python3-pg8000.install, d/python-pg8000-doc.*: The docs directory
+    has been consolidated to README.adoc.
+  * d/rules: Don't run tests during build. The tests didn't run in
+    the past and require a running PostgreSQL database.
 
- -- Sandro Tosi <morph@debian.org>  Wed, 04 May 2022 17:47:06 -0400
+ -- Corey Bryant <corey.bryant@canonical.com>  Tue, 07 Dec 2021 11:44:13 -0500
+
+pg8000 (1.10.6-2build1) focal; urgency=medium
+
+  * No-change rebuild to generate dependencies on python2.
+
+ -- Matthias Klose <doko@ubuntu.com>  Tue, 17 Dec 2019 12:52:30 +0000
 
 pg8000 (1.10.6-2) unstable; urgency=medium
 
diff -pruN 1.10.6-3/debian/control 1.23.0-0ubuntu1/debian/control
--- 1.10.6-3/debian/control	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/control	2021-12-07 16:44:13.000000000 +0000
@@ -1,27 +1,30 @@
 Source: pg8000
 Section: python
 Priority: optional
-Maintainer: Debian Python Team <team+python@tracker.debian.org>
+Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
+XSBC-Original-Maintainer: Debian Python Modules Team <python-modules-team@lists.alioth.debian.org>
 Uploaders:
  Dominik George <nik@naturalnet.de>,
  Rahul Amaram <amaramrahul@users.sourceforge.net>,
 Build-Depends:
- debhelper-compat (= 13),
+ debhelper-compat (= 10),
  dh-exec,
  dh-python,
  python3-all,
  python3-setuptools,
- python3-six,
- python3-sphinx,
+Build-Depends-Indep:
+ python3-pytest,
+ python3-scramp,
 Standards-Version: 3.9.8
 Testsuite: autopkgtest-pkg-python
 Homepage: https://github.com/mfenniak/pg8000
-Vcs-Git: https://salsa.debian.org/python-team/packages/pg8000.git
-Vcs-Browser: https://salsa.debian.org/python-team/packages/pg8000
+Vcs-Git: https://git.launchpad.net/~ubuntu-openstack-dev/ubuntu/+source/pg8000
+Vcs-Browser: https://git.launchpad.net/~ubuntu-openstack-dev/ubuntu/+source/pg8000
 
 Package: python3-pg8000
 Architecture: all
 Depends:
+ python3-scramp,
  ${misc:Depends},
  ${python3:Depends},
 Suggests:
@@ -44,8 +47,6 @@ Architecture: all
 Section: doc
 Depends:
  ${misc:Depends},
- ${sphinxdoc:Depends},
-Multi-Arch: foreign
 Description: Pure-Python PostgreSQL Driver (documentation)
  pg8000 is a Pure-Python interface to the PostgreSQL database engine.  It is
  one of many PostgreSQL interfaces for the Python programming language. pg8000
diff -pruN 1.10.6-3/debian/gbp.conf 1.23.0-0ubuntu1/debian/gbp.conf
--- 1.10.6-3/debian/gbp.conf	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/gbp.conf	2021-12-07 16:44:13.000000000 +0000
@@ -0,0 +1,7 @@
+[DEFAULT]
+debian-branch = master
+upstream-tag = %(version)s
+pristine-tar = True
+
+[buildpackage]
+export-dir = ../build-area
diff -pruN 1.10.6-3/debian/patches/0001-Don-t-try-to-encode-non-text.patch 1.23.0-0ubuntu1/debian/patches/0001-Don-t-try-to-encode-non-text.patch
--- 1.10.6-3/debian/patches/0001-Don-t-try-to-encode-non-text.patch	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/patches/0001-Don-t-try-to-encode-non-text.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,27 +0,0 @@
-From 52ec28329c68846c1571b9c708e3c4e012aeff2b Mon Sep 17 00:00:00 2001
-From: Ximin Luo <infinity0@debian.org>
-Date: Wed, 12 Oct 2016 10:18:44 +0200
-Subject: Don't try to encode non-text
-
- pg8000 when used by calendarserver sometimes passes unicode objects into
- text_out which causes an exception. This patch fixes that; it should not
- break anything else, but perhaps a wider fix is more appropiate - awaiting
- reply from upstream on that.
-Bug: https://github.com/mfenniak/pg8000/issues/93
----
- pg8000/core.py | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/pg8000/core.py b/pg8000/core.py
-index fe8507c..78429dd 100644
---- a/pg8000/core.py
-+++ b/pg8000/core.py
-@@ -1332,6 +1332,8 @@ class Connection(object):
-         self.ParameterStatusReceived += self.handle_PARAMETER_STATUS
- 
-         def text_out(v):
-+            if not isinstance(v, text_type):
-+                return v
-             return v.encode(self._client_encoding)
- 
-         def time_out(v):
diff -pruN 1.10.6-3/debian/patches/series 1.23.0-0ubuntu1/debian/patches/series
--- 1.10.6-3/debian/patches/series	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/patches/series	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-0001-Don-t-try-to-encode-non-text.patch
diff -pruN 1.10.6-3/debian/python3-pg8000.install 1.23.0-0ubuntu1/debian/python3-pg8000.install
--- 1.10.6-3/debian/python3-pg8000.install	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/python3-pg8000.install	2021-12-07 16:44:13.000000000 +0000
@@ -1,2 +1,2 @@
 #!/usr/bin/dh-exec
-doc/release_notes.rst => usr/share/doc/python3-pg8000/changelog
+README.adoc => usr/share/doc/python3-pg8000/README.adoc
diff -pruN 1.10.6-3/debian/python-pg8000-doc.docs 1.23.0-0ubuntu1/debian/python-pg8000-doc.docs
--- 1.10.6-3/debian/python-pg8000-doc.docs	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/python-pg8000-doc.docs	2021-12-07 16:44:13.000000000 +0000
@@ -1 +1 @@
-build/sphinx/html/
+README.adoc
diff -pruN 1.10.6-3/debian/python-pg8000-doc.links 1.23.0-0ubuntu1/debian/python-pg8000-doc.links
--- 1.10.6-3/debian/python-pg8000-doc.links	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/python-pg8000-doc.links	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-usr/share/doc/python-pg8000-doc/html/_sources/release_notes.rst usr/share/doc/python-pg8000-doc/changelog
diff -pruN 1.10.6-3/debian/python-pg8000-doc.lintian-overrides 1.23.0-0ubuntu1/debian/python-pg8000-doc.lintian-overrides
--- 1.10.6-3/debian/python-pg8000-doc.lintian-overrides	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/python-pg8000-doc.lintian-overrides	1970-01-01 00:00:00.000000000 +0000
@@ -1,2 +0,0 @@
-# it's a symlink to a sphinxdoc source file
-python-pg8000-doc binary: changelog-file-not-compressed changelog
diff -pruN 1.10.6-3/debian/rules 1.23.0-0ubuntu1/debian/rules
--- 1.10.6-3/debian/rules	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/rules	2021-12-07 16:44:13.000000000 +0000
@@ -7,8 +7,9 @@
 export PYBUILD_NAME=pg8000
 
 %:
-	dh $@ --with python3,sphinxdoc --buildsystem=pybuild
+	dh $@ --with python3 --buildsystem=pybuild
 
-override_dh_installdocs:
-	python3 setup.py build_sphinx
-	dh_installdocs
+override_dh_auto_test:
+	# The tests provided for pg8000 are functional tests
+	# that require a running PostgreSQL database.
+	echo "Do nothing..."
diff -pruN 1.10.6-3/debian/source/lintian-overrides 1.23.0-0ubuntu1/debian/source/lintian-overrides
--- 1.10.6-3/debian/source/lintian-overrides	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/source/lintian-overrides	2021-12-07 16:44:13.000000000 +0000
@@ -1,2 +1,2 @@
 # Upstream does not sign tarballs. Discussing that, though.
-pg8000 source: debian-watch-does-not-check-gpg-signature
+pg8000 source: debian-watch-may-check-gpg-signature
diff -pruN 1.10.6-3/debian/upstream/metadata 1.23.0-0ubuntu1/debian/upstream/metadata
--- 1.10.6-3/debian/upstream/metadata	2022-05-04 21:47:06.000000000 +0000
+++ 1.23.0-0ubuntu1/debian/upstream/metadata	1970-01-01 00:00:00.000000000 +0000
@@ -1,4 +0,0 @@
-Bug-Database: https://github.com/mfenniak/pg8000/issues
-Bug-Submit: https://github.com/mfenniak/pg8000/issues/new
-Repository: https://github.com/mfenniak/pg8000.git
-Repository-Browse: https://github.com/mfenniak/pg8000
diff -pruN 1.10.6-3/doc/conf.py 1.23.0-0ubuntu1/doc/conf.py
--- 1.10.6-3/doc/conf.py	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/doc/conf.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,180 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# pg8000 documentation build configuration file, created by
-# sphinx-quickstart on Mon Sep 15 09:38:48 2008.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# The contents of this file are pickled, so don't put values in the namespace
-# that aren't pickleable (module imports are okay, they're removed automatically).
-#
-# All configuration values have a default value; values that are commented out
-# serve to show the default value.
-
-import sys, os
-# If your extensions are in another directory, add it here. If the directory
-# is relative to the documentation root, use os.path.abspath to make it
-# absolute, like shown here.
-sys.path.append(os.path.abspath('../pg8000'))
-
-# General configuration
-# ---------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['.templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General substitutions.
-project = 'pg8000'
-copyright = '2008, Mathieu Fenniak'
-
-# 'version' and 'release' are set in setup.py
-#
-# The default replacements for |version| and |release|, also used in various
-# other places throughout the built documents.
-#
-# The short X.Y version.
-#version = ''
-# The full version, including alpha/beta/rc tags.
-#release = ''
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directories, that shouldn't be searched
-# for source files.
-#exclude_dirs = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-
-# Options for HTML output
-# -----------------------
-
-# The style sheet to use for HTML and HTML Help pages. A file of that name
-# must exist either in Sphinx' static/ path, or in one of the custom paths
-# given in html_static_path.
-html_style = 'default.css'
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (within the static path) to place at the top of
-# the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['.static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, the reST sources are included in the HTML build as _sources/<name>.
-#html_copy_source = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'pg8000doc'
-
-
-# Options for LaTeX output
-# ------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, document class [howto/manual]).
-latex_documents = [
-  ('index', 'pg8000.tex', 'pg8000 Documentation',
-   'Mathieu Fenniak', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
diff -pruN 1.10.6-3/doc/dbapi.rst 1.23.0-0ubuntu1/doc/dbapi.rst
--- 1.10.6-3/doc/dbapi.rst	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/doc/dbapi.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,110 +0,0 @@
-.. module:: pg8000
-
-API Reference for pg8000.
-
-Properties
-----------
-.. autodata:: __version__
-   :annotation:
-
-.. autodata:: apilevel
-   :annotation:
-    
-.. autodata:: threadsafety
-   :annotation:
-
-.. autodata:: paramstyle
-   :annotation:
-
-.. autodata:: STRING
-   :annotation:
-
-.. attribute:: BINARY
-
-.. autodata:: NUMBER
-   :annotation:
-
-.. autodata:: DATETIME
-   :annotation:
-
-.. autodata:: ROWID
-   :annotation:
-
-
-Functions
----------
-
-.. autofunction:: connect
-
-.. autofunction:: Date
-
-.. autofunction:: Time
-
-.. autofunction:: Timestamp
-
-.. autofunction:: DateFromTicks
-
-.. autofunction:: TimeFromTicks
-
-.. autofunction:: TimestampFromTicks
-
-.. autofunction:: Binary
-
-
-Generic Exceptions
-------------------
-pg8000 uses the standard DBAPI 2.0 exception tree as "generic" exceptions.
-Generally, more specific exception types are raised; these specific exception
-types are derived from the generic exceptions.
-
-.. autoexception:: Warning
-
-.. autoexception:: Error
-
-.. autoexception:: InterfaceError
-
-.. autoexception:: DatabaseError
-
-.. autoexception:: DataError
-
-.. autoexception:: OperationalError
-
-.. autoexception:: IntegrityError
-
-.. autoexception:: InternalError
-
-.. autoexception:: ProgrammingError
-
-.. autoexception:: NotSupportedError
-
-
-Specific Exceptions
--------------------
-    
-Exceptions that are subclassed from the standard DB-API 2.0 exceptions above.
-
-.. autoexception:: ArrayContentNotSupportedError
-
-.. autoexception:: ArrayContentNotHomogenousError
-
-.. autoexception:: ArrayContentEmptyError
-
-.. autoexception:: ArrayDimensionsNotConsistentError
-
-
-Classes
--------
-
-.. autoclass:: Connection()
-   :members:
-
-.. autoclass:: Cursor()
-   :members:
-
-
-Type Classes
-------------
-
-.. autoclass:: Bytea
-
-.. autoclass:: Interval
diff -pruN 1.10.6-3/doc/index.rst 1.23.0-0ubuntu1/doc/index.rst
--- 1.10.6-3/doc/index.rst	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/doc/index.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,39 +0,0 @@
-##########
-  pg8000
-##########
-
-pg8000 is a `DB-API 2.0 <http://www.python.org/dev/peps/pep-0249/>`_ compatible
-pure-Python interface to the `PostgreSQL <http://www.postgresql.org/>`_
-database engine. It is one of many PostgreSQL interfaces for the Python
-programming language. pg8000 is somewhat distinctive in that it is written
-entirely in Python and does not rely on any external libraries (such as a
-compiled python module, or PostgreSQL's libpq library).
-
-pg8000's name comes from the belief that it is probably about the 8000th
-PostgreSQL interface for Python. pg8000 is distributed under the terms of the 
-BSD 3-clause license.
-
-All bug reports, feature requests and contributions are welcome at
-http://github.com/mfenniak/pg8000/. There's also a forum at https://groups.google.com/forum/#!forum/pg8000.
-
-If you're a user of pg8000 we'd love to hear who you are and what you or your
-organization is using pg8000 for. Just log an issue on GitHub or post a message
-to the forum. The idea is to put together a page of users.
-
-Contents
---------
-
-.. toctree::
-    :maxdepth: 2
-    
-    quickstart
-    dbapi
-    types
-    release_notes
-
-
-Indices and tables
-------------------
-
-* :ref:`genindex`
-* :ref:`search`
diff -pruN 1.10.6-3/doc/Makefile 1.23.0-0ubuntu1/doc/Makefile
--- 1.10.6-3/doc/Makefile	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/doc/Makefile	1970-01-01 00:00:00.000000000 +0000
@@ -1,177 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = build
-
-# User-friendly check for sphinx-build
-ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
-$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
-endif
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
-	@echo "  text       to make text files"
-	@echo "  man        to make manual pages"
-	@echo "  texinfo    to make Texinfo files"
-	@echo "  info       to make Texinfo files and run them through makeinfo"
-	@echo "  gettext    to make PO message catalogs"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  xml        to make Docutils-native XML files"
-	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pg8000.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pg8000.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/pg8000"
-	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pg8000"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make' in that directory to run these through (pdf)latex" \
-	      "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through pdflatex..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-latexpdfja:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through platex and dvipdfmx..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
-	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
-	@echo
-	@echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
-	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
-	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo
-	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
-	@echo "Run \`make' in that directory to run these through makeinfo" \
-	      "(use \`make info' here to do that automatically)."
-
-info:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo "Running Texinfo files through makeinfo..."
-	make -C $(BUILDDIR)/texinfo info
-	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
-	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
-	@echo
-	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
-
-xml:
-	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
-	@echo
-	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
-
-pseudoxml:
-	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
-	@echo
-	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff -pruN 1.10.6-3/doc/quickstart.rst 1.23.0-0ubuntu1/doc/quickstart.rst
--- 1.10.6-3/doc/quickstart.rst	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/doc/quickstart.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,114 +0,0 @@
-Quick Start
-===========
-
-Key Points
-----------
-
-- Runs on Python versions 2.7+ and 3.3+
-- Runs on CPython, Jython and PyPy
-- Although it's possible for threads to share cursors and connections, for
-  performance reasons it's best to use one thread per connection.
-- Internally, all queries use prepared statements. pg8000 remembers that a
-  prepared statement has been created, and uses it on subsequent queries.
-
-Installation
-------------
-
-To install pg8000 using `pip <https://pypi.python.org/pypi/pip>`_ type:
-
-``pip install pg8000``
-
-
-Interactive Example
--------------------
-
-Import pg8000, connect to the database, create a table, add some rows and then
-query the table:
-
-.. code-block:: python
-
-    >>> import pg8000
-    >>> conn = pg8000.connect(user="postgres", password="C.P.Snow")
-    >>> cursor = conn.cursor()
-    >>> cursor.execute("CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)")
-    >>> cursor.execute(
-    ...     "INSERT INTO book (title) VALUES (%s), (%s) RETURNING id, title",
-    ...     ("Ender's Game", "Speaker for the Dead"))
-    >>> results = cursor.fetchall()
-    >>> for row in results:
-    ...     id, title = row
-    ...     print("id = %s, title = %s" % (id, title))
-    id = 1, title = Ender's Game
-    id = 2, title = Speaker for the Dead
-    >>> conn.commit()
-
-Another query, using some PostgreSQL functions:
-
-.. code-block:: python
-
-    >>> cursor.execute("SELECT extract(millennium from now())")
-    >>> cursor.fetchone()
-    [3.0]
-
-A query that returns the PostgreSQL interval type:
-
-.. code-block:: python
-
-    >>> import datetime
-    >>> cursor.execute("SELECT timestamp '2013-12-01 16:06' - %s",
-    ... (datetime.date(1980, 4, 27),))
-    >>> cursor.fetchone()
-    [datetime.timedelta(12271, 57960)]
-
-pg8000 supports all the DB-API parameter styles. Here's an example of using
-the 'numeric' parameter style:
-
-.. code-block:: python
-
-    >>> pg8000.paramstyle = "numeric"
-    >>> cursor.execute("SELECT array_prepend(:1, :2)", ( 500, [1, 2, 3, 4], ))
-    >>> cursor.fetchone()
-    [[500, 1, 2, 3, 4]]
-    >>> pg8000.paramstyle = "format"
-    >>> conn.rollback()
-
-Following the DB-API specification, autocommit is off by default. It can be
-turned on by using the autocommit property of the connection.
-
-.. code-block:: python
-
-    >>> conn.autocommit = True
-    >>> cur = conn.cursor()
-    >>> cur.execute("vacuum")
-    >>> conn.autocommit = False
-    >>> cursor.close()
-
-When communicating with the server, pg8000 uses the character set that the
-server asks it to use (the client encoding). By default the client encoding is
-the database's character set (chosen when the database is created), but the
-client encoding can be changed in a number of ways (eg. setting
-CLIENT_ENCODING in postgresql.conf). Another way of changing the client
-encoding is by using an SQL command. For example:
-
-.. code-block:: python
-
-    >>> cur = conn.cursor()
-    >>> cur.execute("SET CLIENT_ENCODING TO 'UTF8'")
-    >>> cur.execute("SHOW CLIENT_ENCODING")
-    >>> cur.fetchone()
-    ['UTF8']
-    >>> cur.close()
-
-JSON is sent to the server serialized, and returned de-serialized. Here's an
-example:
-
-.. code-block:: python
-
-    >>> import json
-    >>> cur = conn.cursor()
-    >>> val = ['Apollo 11 Cave', True, 26.003]
-    >>> cur.execute("SELECT cast(%s as json)", (json.dumps(val),))
-    >>> cur.fetchone()
-    [['Apollo 11 Cave', True, 26.003]]
-    >>> cur.close()
-    >>> conn.close()
diff -pruN 1.10.6-3/doc/release_notes.rst 1.23.0-0ubuntu1/doc/release_notes.rst
--- 1.10.6-3/doc/release_notes.rst	2016-06-10 10:31:11.000000000 +0000
+++ 1.23.0-0ubuntu1/doc/release_notes.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,581 +0,0 @@
-Release Notes
-=============
-
-Version 1.10.6, 2016-06-10
---------------------------
-- Fixed a problem where we weren't handling the password connection parameter
-  correctly. Now it's handled in the same way as the 'user' and 'database'
-  parameters, ie. if the password is bytes, then pass it straight through to the
-  database, if it's a string then encode it with utf8.
-
-- It used to be that if the 'user' parameter to the connection function was
-  'None', then pg8000 would try and look at environment variables to find a
-  username. Now we just go by the 'user' parameter only, and give an error if
-  it's None.
-
-
-Version 1.10.5, 2016-03-04
---------------------------
-- Include LICENCE text and sources for docs in the source distribution (the
-  tarball).
-
-
-Version 1.10.4, 2016-02-27
---------------------------
-- Fixed bug where if a str is sent as a query parameter, and then with the same
-  cursor an int is sent instead of a string, for the same query, then it fails.
-
-- Under Python 2, a str type is now sent 'as is', ie. as a byte string rather
-  than trying to decode and send according to the client encoding. Under Python
-  2 it's recommended to send text as unicode() objects.
-
-- Dropped and added support for Python versions. Now pg8000 supports
-  Python 2.7+ and Python 3.3+. 
-
-- Dropped and added support for PostgreSQL versions. Now pg8000 supports
-  PostgreSQL 9.1+.
-
-- pg8000 uses the 'six' library for making the same code run on both Python 2
-  and Python 3. We used to include it as a file in the pg8000 source code. Now
-  we have it as a separate dependency that's installed with 'pip install'. The
-  reason for doing this is that package maintainers for OS distributions
-  prefer unbundled libaries.
-
-
-Version 1.10.3, 2016-01-07
---------------------------
-- Removed testing for PostgreSQL 9.0 as it's not longer supported by the
-  PostgreSQL Global Development Group.
-- Fixed bug where pg8000 would fail with datetimes if PostgreSQL was compiled
-  with the integer_datetimes option set to 'off'. The bug was in the
-  timestamp_send_float function.
-
-
-Version 1.10.2, 2015-03-17
---------------------------
-- If there's a socket exception thrown when communicating with the database,
-  it is now wrapped in an OperationalError exception, to conform to the DB-API
-  spec.
-
-- Previously, pg8000 didn't recognize the EmptyQueryResponse (that the server
-  sends back if the SQL query is an empty string) now we raise a
-  ProgrammingError exception.
-
-- Added socket timeout option for Python 3.
-
-- If the server returns an error, we used to initialize the ProgramerException
-  with just the first three fields of the error. Now we initialize the
-  ProgrammerException with all the fields.
-
-- Use relative imports inside package.
-
-- User and database names given as bytes. The user and database parameters of
-  the connect() function are now passed directly as bytes to the server. If the
-  type of the parameter is unicode, pg8000 converts it to bytes using the uft8
-  encoding.
-
-- Added support for JSON and JSONB Postgres types. We take the approach of
-  taking serialized JSON (str) as an SQL parameter, but returning results as
-  de-serialized JSON (Python objects). See the example in the Quickstart.
-
-- Added CircleCI continuous integration.
-
-- String support in arrays now allow letters like "u", braces and whitespace.
-
-
-Version 1.10.1, 2014-09-15
---------------------------
-- Add support for the Wheel package format.
-
-- Remove option to set a connection timeout. For communicating with the server,
-  pg8000 uses a file-like object using socket.makefile() but you can't use this
-  if the underlying socket has a timeout.
-
-
-Version 1.10.0, 2014-08-30
---------------------------
-- Remove the old ``pg8000.dbapi`` and ``pg8000.DBAPI`` namespaces. For example,
-  now only ``pg8000.connect()`` will work, and ``pg8000.dbapi.connect()``
-  won't work any more.
-
-- Parse server version string with LooseVersion. This should solve the problems
-  that people have been having when using versions of PostgreSQL such as
-  ``9.4beta2``.
-
-- Message if portal suspended in autocommit. Give a proper error message if the
-  portal is suspended while in autocommit mode. The error is that the portal is
-  closed when the transaction is closed, and so in autocommit mode the portal
-  will be immediately closed. The bottom line is, don't use autocommit mode if
-  there's a chance of retrieving more rows than the cache holds (currently 100).
-
-
-Version 1.9.14, 2014-08-02
---------------------------
-
-- Make ``executemany()`` set ``rowcount``. Previously, ``executemany()`` would
-  always set ``rowcount`` to -1. Now we set it to a meaningful value if
-  possible. If any of the statements have a -1 ``rowcount`` then then the
-  ``rowcount`` for the ``executemany()`` is -1, otherwise the ``executemany()``
-  ``rowcount`` is the sum of the rowcounts of the individual statements.
-
-- Support for password authentication. pg8000 didn't support plain text
-  authentication, now it does.
-
-
-Version 1.9.13, 2014-07-27
---------------------------
-
-- Reverted to using the string ``connection is closed`` as the message of the
-  exception that's thrown if a connection is closed. For a few versions we were
-  using a slightly different one with capitalization and punctuation, but we've
-  reverted to the original because it's easier for users of the library to
-  consume.
-
-- Previously, ``tpc_recover()`` would start a transaction if one was not already
-  in progress. Now it won't.
-
-
-Version 1.9.12, 2014-07-22
---------------------------
-
-- Fixed bug in ``tpc_commit()`` where a single phase commit failed.
-
-
-Version 1.9.11, 2014-07-20
---------------------------
-
-- Add support for two-phase commit DBAPI extension. Thanks to Mariano Reingart's
-  TPC code on the Google Code version:
-
-  https://code.google.com/p/pg8000/source/detail?r=c8609701b348b1812c418e2c7
-
-  on which the code for this commit is based.
-
-- Deprecate ``copy_from()`` and ``copy_to()`` The methods ``copy_from()`` and
-  ``copy_to()`` of the ``Cursor`` object are deprecated because it's simpler and
-  more flexible to use the ``execute()`` method with a ``fileobj`` parameter.
-
-- Fixed bug in reporting unsupported authentication codes. Thanks to
-  https://github.com/hackgnar for reporting this and providing the fix.
-
-- Have a default for the ``user`` paramater of the ``connect()`` function. If
-  the ``user`` parameter of the ``connect()`` function isn't provided, look
-  first for the ``PGUSER`` then the ``USER`` environment variables. Thanks to
-  Alex Gaynor https://github.com/alex for this suggestion.
-
-- Before PostgreSQL 8.2, ``COPY`` didn't give row count. Until PostgreSQL 8.2
-  (which includes Amazon Redshift which forked at 8.0) the ``COPY`` command
-  didn't return a row count, but pg8000 thought it did. That's fixed now.
-
-
-Version 1.9.10, 2014-06-08
---------------------------
-- Remember prepared statements. Now prepared statements are never closed, and
-  pg8000 remembers which ones are on the server, and uses them when a query is
-  repeated. This gives an increase in performance, because on subsequent
-  queries the prepared statement doesn't need to be created each time.
-
-- For performance reasons, pg8000 never closed portals explicitly, it just
-  let the server close them at the end of the transaction. However, this can
-  cause memory problems for long running transactions, so now pg800 always
-  closes a portal after it's exhausted.
-
-- Fixed bug where unicode arrays failed under Python 2. Thanks to
-  https://github.com/jdkx for reporting this.
-
-- A FLUSH message is now sent after every message (except SYNC). This is in
-  accordance with the protocol docs, and ensures the server sends back its
-  responses straight away.
-
-
-Version 1.9.9, 2014-05-12
--------------------------
-- The PostgreSQL interval type is now mapped to datetime.timedelta where
-  possible. Previously the PostgreSQL interval type was always mapped to the
-  pg8000.Interval type. However, to support the datetime.timedelta type we
-  now use it whenever possible. Unfortunately it's not always possible because
-  timedelta doesn't support months. If months are needed then the fall-back
-  is the pg8000.Interval type. This approach means we handle timedelta in a
-  similar way to other Python PostgreSQL drivers, and it makes pg8000
-  compatible with popular ORMs like SQLAlchemy.
-
-* Fixed bug in executemany() where a new prepared statement should be created
-  for each variation in the oids of the parameter sets.
-
-
-Version 1.9.8, 2014-05-05
--------------------------
-- We used to ask the server for a description of the statement, and then ask
-  for a description of each subsequent portal. We now only ask for a
-  description of the statement. This results in a significant performance
-  improvement, especially for executemany() calls and when using the
-  'use_cache' option of the connect() function.
-
-- Fixed warning in Python 3.4 which was saying that a socket hadn't been
-  closed. It seems that closing a socket file doesn't close the underlying
-  socket.
-
-- Now should cope with PostgreSQL 8 versions before 8.4. This includes Amazon
-  Redshift.
-
-- Added 'unicode' alias for 'utf-8', which is needed for Amazon Redshift.
-
-- Various other bug fixes.
-
-
-Version 1.9.7, 2014-03-26
--------------------------
-- Caching of prepared statements. There's now a 'use_cache' boolean parameter
-  for the connect() function, which causes all prepared statements to be cached
-  by pg8000, keyed on the SQL query string. This should speed things up
-  significantly in most cases.
-
-- Added support for the PostgreSQL inet type. It maps to the Python types
-  IPv*Address and IPv*Network.
-
-- Added support for PostgreSQL +/- infinity date and timestamp values. Now the
-  Python value datetime.datetime.max maps to the PostgreSQL value 'infinity'
-  and datetime.datetime.min maps to '-infinity', and the same for
-  datetime.date.
-
-- Added support for the PostgreSQL types int2vector and xid, which are mostly
-  used internally by PostgreSQL.
-
-
-Version 1.9.6, 2014-02-26
--------------------------
-- Fixed a bug where 'portal does not exist' errors were being generated. Some
-  queries that should have been run in a transaction were run in autocommit
-  mode and so any that suspended a portal had the portal immediately closed,
-  because a portal can only exist within a transaction. This has been solved by
-  determining the transaction status from the READY_FOR_QUERY message.
-
-
-Version 1.9.5, 2014-02-15
--------------------------
-- Removed warn() calls for __next__() and __iter__(). Removing the warn() in
-  __next__() improves the performance tests by ~20%.
-
-- Increased performance of timestamp by ~20%. Should also improve timestamptz.
-
-- Moved statement_number and portal_number from module to Connection. This
-  should reduce lock contention for cases where there's a single module and
-  lots of connections.
-
-- Make decimal_out/in and time_in use client_encoding. These functions used to
-  assume ascii, and I can't think of a case where that wouldn't work.
-  Nonetheless, that theoretical bug is now fixed.
-
-- Fixed a bug in cursor.executemany(), where a non-None parameter in a sequence
-  of parameters, is None in a subsequent sequence of parameters.
-
-
-Version 1.9.4, 2014-01-18
--------------------------
-- Fixed a bug where with Python 2, a parameter with the value Decimal('12.44'),
-  (and probably other numbers) isn't sent correctly to PostgreSQL, and so the
-  command fails. This has been fixed by sending decimal types as text rather
-  than binary. I'd imagine it's slightly faster too.
-
-
-Version 1.9.3, 2014-01-16
--------------------------
-- Fixed bug where there were missing trailing zeros after the decimal point in
-  the NUMERIC type. For example, the NUMERIC value 1.0 was returned as 1 (with
-  no zero after the decimal point).
-
-  This is fixed this by making pg8000 use the text rather than binary
-  representation for the numeric type. This actually doubles the speed of
-  numeric queries.
-
-
-Version 1.9.2, 2013-12-17
--------------------------
-- Fixed incompatibility with PostgreSQL 8.4. In 8.4, the CommandComplete
-  message doesn't return a row count if the command is SELECT. We now look at
-  the server version and don't look for a row count for a SELECT with version
-  8.4.
-
-
-Version 1.9.1, 2013-12-15
--------------------------
-- Fixed bug where the Python 2 'unicode' type wasn't recognized in a query
-  parameter.
-
-
-Version 1.9.0, 2013-12-01
--------------------------
-- For Python 3, the :class:`bytes` type replaces the :class:`pg8000.Bytea`
-  type. For backward compatibility the :class:`pg8000.Bytea` still works under
-  Python 3, but its use is deprecated.
-
-- A single codebase for Python 2 and 3.
-
-- Everything (functions, properties, classes) is now available under the
-  ``pg8000`` namespace. So for example:
-
-  - pg8000.DBAPI.connect() -> pg8000.connect()
-  - pg8000.DBAPI.apilevel -> pg8000.apilevel
-  - pg8000.DBAPI.threadsafety -> pg8000.threadsafety
-  - pg8000.DBAPI.paramstyle -> pg8000.paramstyle
-  - pg8000.types.Bytea -> pg8000.Bytea
-  - pg8000.types.Interval -> pg8000.Interval
-  - pg8000.errors.Warning -> pg8000.Warning
-  - pg8000.errors.Error -> pg8000.Error
-  - pg8000.errors.InterfaceError -> pg8000.InterfaceError
-  - pg8000.errors.DatabaseError -> pg8000.DatabaseError
-
-  The old locations are deprecated, but still work for backward compatibility.
-
-- Lots of performance improvements.
-
-  - Faster receiving of ``numeric`` types.
-  - Query only parsed when PreparedStatement is created.
-  - PreparedStatement re-used in executemany()
-  - Use ``collections.deque`` rather than ``list`` for the row cache. We're
-    adding to one end and removing from the other. This is O(n) for a list but
-    O(1) for a deque.
-  - Find the conversion function and do the format code check in the
-    ROW_DESCRIPTION handler, rather than every time in the ROW_DATA handler.
-  - Use the 'unpack_from' form of struct, when unpacking the data row, so we
-    don't have to slice the data.
-  - Return row as a list for better performance. At the moment result rows are
-    turned into a tuple before being returned. Returning the rows directly as a
-    list speeds up the performance tests about 5%.
-  - Simplify the event loop. Now the main event loop just continues until a
-    READY_FOR_QUERY message is received. This follows the suggestion in the
-    Postgres protocol docs. There's not much of a difference in speed, but the
-    code is a bit simpler, and it should make things more robust.
-  - Re-arrange the code as a state machine to give > 30% speedup.
-  - Using pre-compiled struct objects. Pre-compiled struct objects are a bit
-    faster than using the struct functions directly. It also hopefully adds to
-    the readability of the code.
-  - Speeded up _send. Before calling the socket 'write' method, we were
-    checking that the 'data' type implements the 'buffer' interface (bytes or
-    bytearray), but the check isn't needed because 'write' raises an exception
-    if data is of the wrong type.
-
-
-- Add facility for turning auto-commit on. This follows the suggestion of
-  funkybob to fix the problem of not be able to execute a command such as
-  'create database' that must be executed outside a transaction. Now you can do
-  conn.autocommit = True and then execute 'create database'.
-
-- Add support for the PostgreSQL ``uid`` type. Thanks to Rad Cirskis.
-
-- Add support for the PostgreSQL XML type.
-
-- Add support for the PostgreSQL ``enum`` user defined types.
-
-- Fix a socket leak, where a problem opening a connection could leave a socket
-  open.
-
-- Fix empty array issue. https://github.com/mfenniak/pg8000/issues/10
-
-- Fix scale on ``numeric`` types. https://github.com/mfenniak/pg8000/pull/13
-
-- Fix numeric_send. Thanks to Christian Hofstaedtler.
-
-
-Version 1.08, 2010-06-08
-------------------------
-
-- Removed usage of deprecated :mod:`md5` module, replaced with :mod:`hashlib`.
-  Thanks to Gavin Sherry for the patch.
-
-- Start transactions on execute or executemany, rather than immediately at the
-  end of previous transaction.  Thanks to Ben Moran for the patch.
-
-- Add encoding lookups where needed, to address usage of SQL_ASCII encoding.
-  Thanks to Benjamin Schweizer for the patch.
-
-- Remove record type cache SQL query on every new pg8000 connection.
-
-- Fix and test SSL connections.
-
-- Handle out-of-band messages during authentication.
-
-
-Version 1.07, 2009-01-06
-------------------------
-
-- Added support for :meth:`~pg8000.dbapi.CursorWrapper.copy_to` and
-  :meth:`~pg8000.dbapi.CursorWrapper.copy_from` methods on cursor objects, to
-  allow the usage of the PostgreSQL COPY queries.  Thanks to Bob Ippolito for
-  the original patch.
-
-- Added the :attr:`~pg8000.dbapi.ConnectionWrapper.notifies` and
-  :attr:`~pg8000.dbapi.ConnectionWrapper.notifies_lock` attributes to DBAPI
-  connection objects to provide access to server-side event notifications.
-  Thanks again to Bob Ippolito for the original patch.
-
-- Improved performance using buffered socket I/O.
-
-- Added valid range checks for :class:`~pg8000.types.Interval` attributes.
-
-- Added binary transmission of :class:`~decimal.Decimal` values.  This permits
-  full support for NUMERIC[] types, both send and receive.
-
-- New `Sphinx <http://sphinx.pocoo.org/>`_-based website and documentation.
-
-
-Version 1.06, 2008-12-09
-------------------------
-
-- pg8000-py3: a branch of pg8000 fully supporting Python 3.0.
-
-- New Sphinx-based documentation.
-
-- Support for PostgreSQL array types -- INT2[], INT4[], INT8[], FLOAT[],
-  DOUBLE[], BOOL[], and TEXT[].  New support permits both sending and
-  receiving these values.
-
-- Limited support for receiving RECORD types.  If a record type is received,
-  it will be translated into a Python dict object.
-
-- Fixed potential threading bug where the socket lock could be lost during
-  error handling.
-
-
-Version 1.05, 2008-09-03
-------------------------
-
-- Proper support for timestamptz field type:
-
-  - Reading a timestamptz field results in a datetime.datetime instance that
-    has a valid tzinfo property.  tzinfo is always UTC.
-
-  - Sending a datetime.datetime instance with a tzinfo value will be
-    sent as a timestamptz type, with the appropriate tz conversions done.
-
-- Map postgres < -- > python text encodings correctly.
-
-- Fix bug where underscores were not permitted in pyformat names.
-
-- Support "%s" in a pyformat strin.
-
-- Add cursor.connection DB-API extension.
-
-- Add cursor.next and cursor.__iter__ DB-API extensions.
-
-- DBAPI documentation improvements.
-
-- Don't attempt rollback in cursor.execute if a ConnectionClosedError occurs.
-
-- Add warning for accessing exceptions as attributes on the connection object,
-  as per DB-API spec.
-
-- Fix up open connection when an unexpected connection occurs, rather than
-  leaving the connection in an unusable state.
-
-- Use setuptools/egg package format.
-
-
-Version 1.04, 2008-05-12
-------------------------
-
-- DBAPI 2.0 compatibility:
-
-  - rowcount returns rows affected when appropriate (eg. UPDATE, DELETE)
-
-  - Fix CursorWrapper.description to return a 7 element tuple, as per spec.
-
-  - Fix CursorWrapper.rowcount when using executemany.
-
-  - Fix CursorWrapper.fetchmany to return an empty sequence when no more
-    results are available.
-
-  - Add access to DBAPI exceptions through connection properties.
-
-  - Raise exception on closing a closed connection.
-
-  - Change DBAPI.STRING to varchar type.
-
-  - rowcount returns -1 when appropriate.
-
-  - DBAPI implementation now passes Stuart Bishop's Python DB API 2.0 Anal
-    Compliance Unit Test.
-
-- Make interface.Cursor class use unnamed prepared statement that binds to
-  parameter value types.  This change increases the accuracy of PG's query
-  plans by including parameter information, hence increasing performance in
-  some scenarios.
-
-- Raise exception when reading from a cursor without a result set.
-
-- Fix bug where a parse error may have rendered a connection unusable.
-
-
-Version 1.03, 2008-05-09
-------------------------
-
-- Separate pg8000.py into multiple python modules within the pg8000 package.
-  There should be no need for a client to change how pg8000 is imported.
-
-- Fix bug in row_description property when query has not been completed.
-
-- Fix bug in fetchmany dbapi method that did not properly deal with the end of
-  result sets.
-
-- Add close methods to DB connections.
-
-- Add callback event handlers for server notices, notifications, and runtime
-  configuration changes.
-
-- Add boolean type output.
-
-- Add date, time, and timestamp types in/out.
-
-- Add recognition of "SQL_ASCII" client encoding, which maps to Python's
-  "ascii" encoding.
-
-- Add types.Interval class to represent PostgreSQL's interval data type, and
-  appropriate wire send/receive methods.
-
-- Remove unused type conversion methods.
-
-
-Version 1.02, 2007-03-13
-------------------------
-
-- Add complete DB-API 2.0 interface.
-
-- Add basic SSL support via ssl connect bool.
-
-- Rewrite pg8000_test.py to use Python's unittest library.
-
-- Add bytea type support.
-
-- Add support for parameter output types: NULL value, timestamp value, python
-  long value.
-
-- Add support for input parameter type oid.
-
-
-Version 1.01, 2007-03-09
-------------------------
-
-- Add support for writing floats and decimal objs up to PG backend.
-
-- Add new error handling code and tests to make sure connection can recover
-  from a database error.
-
-- Fixed bug where timestamp types were not always returned in the same binary
-  format from the PG backend.  Text format is now being used to send
-  timestamps.
-
-- Fixed bug where large packets from the server were not being read fully, due
-  to socket.read not always returning full read size requested.  It was a
-  lazy-coding bug.
-
-- Added locks to make most of the library thread-safe.
-
-- Added UNIX socket support.
-
-
-Version 1.00, 2007-03-08
-------------------------
-
-- First public release.  Although fully functional, this release is mostly
-  lacking in production testing and in type support.
-
diff -pruN 1.10.6-3/doc/types.rst 1.23.0-0ubuntu1/doc/types.rst
--- 1.10.6-3/doc/types.rst	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/doc/types.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,89 +0,0 @@
-Type Mapping
-============
-
-The following table shows the mapping between Python types and PostgreSQL
-types, and vice versa.
-
-If pg8000 doesn't recognize a type that it receives from PostgreSQL, it will
-return it as a ``str`` type. This is how pg8000 handles PostgreSQL ``enum`` and
-XML types.
-
-+--------------------------------+-----------------+---------------------------+
-| Python Type                    | PostgreSQL Type | Notes                     |
-+================================+=================+===========================+
-| :class:`bool`                  | bool            |                           |
-+--------------------------------+-----------------+---------------------------+
-| :class:`int`                   | int4            |                           |
-+--------------------------------+-----------------+---------------------------+
-| :class:`long`                  | numeric         | Python 2 only.            |
-+--------------------------------+-----------------+---------------------------+
-| :class:`str`                   | text (Python 3) |                           |
-|                                | bytea (Python 2)|                           |
-+--------------------------------+-----------------+---------------------------+
-| :class:`unicode`               | text            | Python 2 only.            |
-+--------------------------------+-----------------+---------------------------+
-| :class:`float`                 | float8          |                           |
-+--------------------------------+-----------------+---------------------------+
-| :class:`decimal.Decimal`       | numeric         |                           |
-+--------------------------------+-----------------+---------------------------+
-| :class:`pg8000.Bytea`          | bytea           | Python 2 only.            |
-+--------------------------------+-----------------+---------------------------+
-| :class:`bytes`                 | bytea           | Python 3 only.            |
-+--------------------------------+-----------------+---------------------------+
-| :class:`datetime.datetime`     | timestamp       | ``datetime.datetime.max`` |
-| (wo/ tzinfo)                   | without time    | maps to ``infinity``, and |
-|                                | zone            | ``datetime.datetime.min`` |
-|                                |                 | maps to ``-infinity``.    |
-+--------------------------------+-----------------+---------------------------+
-| :class:`datetime.datetime`     | timestamp with  | ``datetime.datetime.max`` |
-| (w/ tzinfo)                    | time zone       | maps to ``infinity``, and |
-|                                |                 | ``datetime.datetime.min`` |
-|                                |                 | maps to ``-infinity``.    |
-|                                |                 | The max and min datetimes |
-|                                |                 | have a UTC timezone.      |
-+--------------------------------+-----------------+---------------------------+
-| :class:`datetime.date`         | date            | ``datetime.date.max``     |
-|                                |                 | maps to ``infinity``, and |
-|                                |                 | ``datetime.date.min``     |
-|                                |                 | maps to ``-infinity``.    |
-+--------------------------------+-----------------+---------------------------+
-| :class:`datetime.time`         | time without    |                           |
-|                                | time zone       |                           |
-+--------------------------------+-----------------+---------------------------+
-| :class:`datetime.timedelta`    | interval        | datetime.timedelta is     |
-| :class:`pg8000.Interval`       |                 | used unless the interval  |
-|                                |                 | has months, in which case |
-|                                |                 | pg8000.Interval is used   |
-+--------------------------------+-----------------+---------------------------+
-| None                           | NULL            |                           |
-+--------------------------------+-----------------+---------------------------+
-| :class:`uuid.UUID`             | uuid            |                           |
-+--------------------------------+-----------------+---------------------------+
-| :class:`ipaddress.IPv4Address` | inet            | Python 3.3 onwards        |
-+--------------------------------+-----------------+---------------------------+
-| :class:`ipaddress.IPv6Address` | inet            | Python 3.3 onwards        |
-+--------------------------------+-----------------+---------------------------+
-| :class:`ipaddress.IPv4Network` | inet            | Python 3.3 onwards        |
-+--------------------------------+-----------------+---------------------------+
-| :class:`ipaddress.IPv6Network` | inet            | Python 3.3 onwards        |
-+--------------------------------+-----------------+---------------------------+
-| :class:`int`                   | xid             |                           |
-+--------------------------------+-----------------+---------------------------+
-| list of :class:`int`           | INT4[]          |                           |
-+--------------------------------+-----------------+---------------------------+
-| list of :class:`float`         | FLOAT8[]        |                           |
-+--------------------------------+-----------------+---------------------------+
-| list of :class:`bool`          | BOOL[]          |                           |
-+--------------------------------+-----------------+---------------------------+
-| list of :class:`str`           | TEXT[]          |                           |
-+--------------------------------+-----------------+---------------------------+
-| list of :class:`unicode`       | TEXT[]          | Python 2 only.            |
-+--------------------------------+-----------------+---------------------------+
-| list of :class:`int`           | int2vector      | Only from PostgreSQL to   |
-|                                |                 | Python                    |
-+--------------------------------+-----------------+---------------------------+
-| JSON                           | json, jsonb     | JSON string as an SQL     |
-|                                |                 | parameter. Results        |
-|                                |                 | returned as de-serialized |
-|                                |                 | JSON.                     |
-+--------------------------------+-----------------+---------------------------+
diff -pruN 1.10.6-3/MANIFEST.in 1.23.0-0ubuntu1/MANIFEST.in
--- 1.10.6-3/MANIFEST.in	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/MANIFEST.in	2021-07-30 12:10:36.000000000 +0000
@@ -1,5 +1,6 @@
-include README.creole
+include README.adoc
 include versioneer.py
 include pg8000/_version.py
 include LICENSE
-include doc/*
+graft test
+global-exclude *.py[cod]
diff -pruN 1.10.6-3/pg8000/converters.py 1.23.0-0ubuntu1/pg8000/converters.py
--- 1.10.6-3/pg8000/converters.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000/converters.py	2021-11-10 18:48:29.000000000 +0000
@@ -0,0 +1,688 @@
+from datetime import (
+    date as Date,
+    datetime as Datetime,
+    time as Time,
+    timedelta as Timedelta,
+    timezone as Timezone,
+)
+from decimal import Decimal
+from enum import Enum
+from ipaddress import (
+    IPv4Address,
+    IPv4Network,
+    IPv6Address,
+    IPv6Network,
+    ip_address,
+    ip_network,
+)
+from json import dumps, loads
+from uuid import UUID
+
+from pg8000.exceptions import InterfaceError
+
+
+ANY_ARRAY = 2277
+BIGINT = 20
+BIGINT_ARRAY = 1016
+BOOLEAN = 16
+BOOLEAN_ARRAY = 1000
+BYTES = 17
+BYTES_ARRAY = 1001
+CHAR = 1042
+CHAR_ARRAY = 1014
+CIDR = 650
+CIDR_ARRAY = 651
+CSTRING = 2275
+CSTRING_ARRAY = 1263
+DATE = 1082
+DATE_ARRAY = 1182
+FLOAT = 701
+FLOAT_ARRAY = 1022
+INET = 869
+INET_ARRAY = 1041
+INT2VECTOR = 22
+INTEGER = 23
+INTEGER_ARRAY = 1007
+INTERVAL = 1186
+INTERVAL_ARRAY = 1187
+OID = 26
+JSON = 114
+JSON_ARRAY = 199
+JSONB = 3802
+JSONB_ARRAY = 3807
+MACADDR = 829
+MONEY = 790
+MONEY_ARRAY = 791
+NAME = 19
+NAME_ARRAY = 1003
+NUMERIC = 1700
+NUMERIC_ARRAY = 1231
+NULLTYPE = -1
+OID = 26
+POINT = 600
+REAL = 700
+REAL_ARRAY = 1021
+SMALLINT = 21
+SMALLINT_ARRAY = 1005
+SMALLINT_VECTOR = 22
+STRING = 1043
+TEXT = 25
+TEXT_ARRAY = 1009
+TIME = 1083
+TIME_ARRAY = 1183
+TIMESTAMP = 1114
+TIMESTAMP_ARRAY = 1115
+TIMESTAMPTZ = 1184
+TIMESTAMPTZ_ARRAY = 1185
+UNKNOWN = 705
+UUID_TYPE = 2950
+UUID_ARRAY = 2951
+VARCHAR = 1043
+VARCHAR_ARRAY = 1015
+XID = 28
+
+
+MIN_INT2, MAX_INT2 = -(2 ** 15), 2 ** 15
+MIN_INT4, MAX_INT4 = -(2 ** 31), 2 ** 31
+MIN_INT8, MAX_INT8 = -(2 ** 63), 2 ** 63
+
+
+def bool_in(data):
+    return data == "t"
+
+
+def bool_out(v):
+    return "true" if v else "false"
+
+
+def bytes_in(data):
+    return bytes.fromhex(data[2:])
+
+
+def bytes_out(v):
+    return "\\x" + v.hex()
+
+
+def cidr_out(v):
+    return str(v)
+
+
+def cidr_in(data):
+    return ip_network(data, False) if "/" in data else ip_address(data)
+
+
+def date_in(data):
+    return Datetime.strptime(data, "%Y-%m-%d").date()
+
+
+def date_out(v):
+    return v.isoformat()
+
+
+def datetime_out(v):
+    if v.tzinfo is None:
+        return v.isoformat()
+    else:
+        return v.astimezone(Timezone.utc).isoformat()
+
+
+def enum_out(v):
+    return str(v.value)
+
+
+def float_out(v):
+    return str(v)
+
+
+def inet_in(data):
+    return ip_network(data, False) if "/" in data else ip_address(data)
+
+
+def inet_out(v):
+    return str(v)
+
+
+def int_in(data):
+    return int(data)
+
+
+def int_out(v):
+    return str(v)
+
+
+def interval_in(data):
+    t = {}
+
+    curr_val = None
+    for k in data.split():
+        if ":" in k:
+            t["hours"], t["minutes"], t["seconds"] = map(float, k.split(":"))
+        else:
+            try:
+                curr_val = float(k)
+            except ValueError:
+                t[PGInterval.UNIT_MAP[k]] = curr_val
+
+    for n in ["weeks", "months", "years", "decades", "centuries", "millennia"]:
+        if n in t:
+            raise InterfaceError(
+                f"Can't fit the interval {t} into a datetime.timedelta."
+            )
+
+    return Timedelta(**t)
+
+
+def interval_out(v):
+    return f"{v.days} days {v.seconds} seconds {v.microseconds} microseconds"
+
+
+def json_in(data):
+    return loads(data)
+
+
+def json_out(v):
+    return dumps(v)
+
+
+def null_out(v):
+    return None
+
+
+def numeric_in(data):
+    return Decimal(data)
+
+
+def numeric_out(d):
+    return str(d)
+
+
+def pg_interval_in(data):
+    return PGInterval.from_str(data)
+
+
+def pg_interval_out(v):
+    return str(v)
+
+
+def string_in(data):
+    return data
+
+
+def string_out(v):
+    return v
+
+
+def time_in(data):
+    pattern = "%H:%M:%S.%f" if "." in data else "%H:%M:%S"
+    return Datetime.strptime(data, pattern).time()
+
+
+def time_out(v):
+    return v.isoformat()
+
+
+def timestamp_in(data):
+    if data in ("infinity", "-infinity"):
+        return data
+
+    pattern = "%Y-%m-%d %H:%M:%S.%f" if "." in data else "%Y-%m-%d %H:%M:%S"
+    return Datetime.strptime(data, pattern)
+
+
+def timestamptz_in(data):
+    patt = "%Y-%m-%d %H:%M:%S.%f%z" if "." in data else "%Y-%m-%d %H:%M:%S%z"
+    return Datetime.strptime(data + "00", patt)
+
+
+def unknown_out(v):
+    return str(v)
+
+
+def vector_in(data):
+    return eval("[" + data.replace(" ", ",") + "]")
+
+
+def uuid_out(v):
+    return str(v)
+
+
+def uuid_in(data):
+    return UUID(data)
+
+
+class PGInterval:
+    UNIT_MAP = {
+        "year": "years",
+        "years": "years",
+        "millennia": "millennia",
+        "millenium": "millennia",
+        "centuries": "centuries",
+        "century": "centuries",
+        "decades": "decades",
+        "decade": "decades",
+        "years": "years",
+        "year": "years",
+        "months": "months",
+        "month": "months",
+        "mon": "months",
+        "mons": "months",
+        "weeks": "weeks",
+        "week": "weeks",
+        "days": "days",
+        "day": "days",
+        "hours": "hours",
+        "hour": "hours",
+        "minutes": "minutes",
+        "minute": "minutes",
+        "seconds": "seconds",
+        "second": "seconds",
+        "microseconds": "microseconds",
+        "microsecond": "microseconds",
+    }
+
+    @staticmethod
+    def from_str(interval_str):
+        t = {}
+
+        curr_val = None
+        for k in interval_str.split():
+            if ":" in k:
+                hours_str, minutes_str, seconds_str = k.split(":")
+                hours = int(hours_str)
+                if hours != 0:
+                    t["hours"] = hours
+                minutes = int(minutes_str)
+                if minutes != 0:
+                    t["minutes"] = minutes
+                try:
+                    seconds = int(seconds_str)
+                except ValueError:
+                    seconds = float(seconds_str)
+
+                if seconds != 0:
+                    t["seconds"] = seconds
+
+            else:
+                try:
+                    curr_val = int(k)
+                except ValueError:
+                    t[PGInterval.UNIT_MAP[k]] = curr_val
+
+        return PGInterval(**t)
+
+    def __init__(
+        self,
+        millennia=None,
+        centuries=None,
+        decades=None,
+        years=None,
+        months=None,
+        weeks=None,
+        days=None,
+        hours=None,
+        minutes=None,
+        seconds=None,
+        microseconds=None,
+    ):
+        self.millennia = millennia
+        self.centuries = centuries
+        self.decades = decades
+        self.years = years
+        self.months = months
+        self.weeks = weeks
+        self.days = days
+        self.hours = hours
+        self.minutes = minutes
+        self.seconds = seconds
+        self.microseconds = microseconds
+
+    def __repr__(self):
+        return f"<PGInterval {self}>"
+
+    def __str__(self):
+        pairs = (
+            ("millennia", self.millennia),
+            ("centuries", self.centuries),
+            ("decades", self.decades),
+            ("years", self.years),
+            ("months", self.months),
+            ("weeks", self.weeks),
+            ("days", self.days),
+            ("hours", self.hours),
+            ("minutes", self.minutes),
+            ("seconds", self.seconds),
+            ("microseconds", self.microseconds),
+        )
+        return " ".join(f"{v} {n}" for n, v in pairs if v is not None)
+
+    def normalize(self):
+        months = 0
+        if self.months is not None:
+            months += self.months
+        if self.years is not None:
+            months += self.years * 12
+
+        days = 0
+        if self.days is not None:
+            days += self.days
+        if self.weeks is not None:
+            days += self.weeks * 7
+
+        seconds = 0
+        if self.hours is not None:
+            seconds += self.hours * 60 * 60
+        if self.minutes is not None:
+            seconds += self.minutes * 60
+        if self.seconds is not None:
+            seconds += self.seconds
+        if self.microseconds is not None:
+            seconds += self.microseconds / 1000000
+
+        return PGInterval(months=months, days=days, seconds=seconds)
+
+    def __eq__(self, other):
+        if isinstance(other, PGInterval):
+            s = self.normalize()
+            o = other.normalize()
+            return s.months == o.months and s.days == o.days and s.seconds == o.seconds
+        else:
+            return False
+
+
+class ArrayState(Enum):
+    InString = 1
+    InEscape = 2
+    InValue = 3
+    Out = 4
+
+
+def _parse_array(data, adapter):
+    state = ArrayState.Out
+    stack = [[]]
+    val = []
+    for c in data:
+        if state == ArrayState.InValue:
+            if c in ("}", ","):
+                value = "".join(val)
+                stack[-1].append(None if value == "NULL" else adapter(value))
+                state = ArrayState.Out
+            else:
+                val.append(c)
+
+        if state == ArrayState.Out:
+            if c == "{":
+                a = []
+                stack[-1].append(a)
+                stack.append(a)
+            elif c == "}":
+                stack.pop()
+            elif c == ",":
+                pass
+            elif c == '"':
+                val = []
+                state = ArrayState.InString
+            else:
+                val = [c]
+                state = ArrayState.InValue
+
+        elif state == ArrayState.InString:
+            if c == '"':
+                stack[-1].append(adapter("".join(val)))
+                state = ArrayState.Out
+            elif c == "\\":
+                state = ArrayState.InEscape
+            else:
+                val.append(c)
+        elif state == ArrayState.InEscape:
+            val.append(c)
+            state = ArrayState.InString
+
+    return stack[0][0]
+
+
+def _array_in(adapter):
+    def f(data):
+        return _parse_array(data, adapter)
+
+    return f
+
+
+bool_array_in = _array_in(bool_in)
+bytes_array_in = _array_in(bytes_in)
+cidr_array_in = _array_in(cidr_in)
+date_array_in = _array_in(date_in)
+inet_array_in = _array_in(inet_in)
+int_array_in = _array_in(int)
+interval_array_in = _array_in(interval_in)
+json_array_in = _array_in(json_in)
+float_array_in = _array_in(float)
+numeric_array_in = _array_in(numeric_in)
+string_array_in = _array_in(string_in)
+time_array_in = _array_in(time_in)
+timestamp_array_in = _array_in(timestamp_in)
+timestamptz_array_in = _array_in(timestamptz_in)
+uuid_array_in = _array_in(uuid_in)
+
+
+def array_string_escape(v):
+    cs = []
+    for c in v:
+        if c == "\\":
+            cs.append("\\")
+        elif c == '"':
+            cs.append("\\")
+        cs.append(c)
+    val = "".join(cs)
+    if (
+        len(val) == 0
+        or val == "NULL"
+        or any([c in val for c in ("{", "}", ",", " ", "\\")])
+    ):
+        val = f'"{val}"'
+    return val
+
+
+def array_out(ar):
+    result = []
+    for v in ar:
+
+        if isinstance(v, (list, tuple)):
+            val = array_out(v)
+
+        elif v is None:
+            val = "NULL"
+
+        elif isinstance(v, dict):
+            val = array_string_escape(json_out(v))
+
+        elif isinstance(v, (bytes, bytearray)):
+            val = f'"\\{bytes_out(v)}"'
+
+        elif isinstance(v, str):
+            val = array_string_escape(v)
+
+        else:
+            val = make_param(PY_TYPES, v)
+
+        result.append(val)
+
+    return "{" + ",".join(result) + "}"
+
+
+PY_PG = {
+    Date: DATE,
+    Decimal: NUMERIC,
+    IPv4Address: INET,
+    IPv6Address: INET,
+    IPv4Network: INET,
+    IPv6Network: INET,
+    PGInterval: INTERVAL,
+    Time: TIME,
+    Timedelta: INTERVAL,
+    UUID: UUID_TYPE,
+    bool: BOOLEAN,
+    bytearray: BYTES,
+    dict: JSONB,
+    float: FLOAT,
+    type(None): NULLTYPE,
+    bytes: BYTES,
+    str: TEXT,
+}
+
+
+PY_TYPES = {
+    Date: date_out,  # date
+    Datetime: datetime_out,
+    Decimal: numeric_out,  # numeric
+    Enum: enum_out,  # enum
+    IPv4Address: inet_out,  # inet
+    IPv6Address: inet_out,  # inet
+    IPv4Network: inet_out,  # inet
+    IPv6Network: inet_out,  # inet
+    PGInterval: interval_out,  # interval
+    Time: time_out,  # time
+    Timedelta: interval_out,  # interval
+    UUID: uuid_out,  # uuid
+    bool: bool_out,  # bool
+    bytearray: bytes_out,  # bytea
+    dict: json_out,  # jsonb
+    float: float_out,  # float8
+    type(None): null_out,  # null
+    bytes: bytes_out,  # bytea
+    str: string_out,  # unknown
+    int: int_out,
+    list: array_out,
+    tuple: array_out,
+}
+
+
+PG_TYPES = {
+    BIGINT: int,  # int8
+    BIGINT_ARRAY: int_array_in,  # int8[]
+    BOOLEAN: bool_in,  # bool
+    BOOLEAN_ARRAY: bool_array_in,  # bool[]
+    BYTES: bytes_in,  # bytea
+    BYTES_ARRAY: bytes_array_in,  # bytea[]
+    CHAR: string_in,  # char
+    CHAR_ARRAY: string_array_in,  # char[]
+    CIDR_ARRAY: cidr_array_in,  # cidr[]
+    CSTRING: string_in,  # cstring
+    CSTRING_ARRAY: string_array_in,  # cstring[]
+    DATE: date_in,  # date
+    DATE_ARRAY: date_array_in,  # date[]
+    FLOAT: float,  # float8
+    FLOAT_ARRAY: float_array_in,  # float8[]
+    INET: inet_in,  # inet
+    INET_ARRAY: inet_array_in,  # inet[]
+    INTEGER: int,  # int4
+    INTEGER_ARRAY: int_array_in,  # int4[]
+    JSON: json_in,  # json
+    JSON_ARRAY: json_array_in,  # json[]
+    JSONB: json_in,  # jsonb
+    JSONB_ARRAY: json_array_in,  # jsonb[]
+    MACADDR: string_in,  # MACADDR type
+    MONEY: string_in,  # money
+    MONEY_ARRAY: string_array_in,  # money[]
+    NAME: string_in,  # name
+    NAME_ARRAY: string_array_in,  # name[]
+    NUMERIC: numeric_in,  # numeric
+    NUMERIC_ARRAY: numeric_array_in,  # numeric[]
+    OID: int,  # oid
+    INTERVAL: interval_in,  # interval
+    INTERVAL_ARRAY: interval_array_in,  # interval[]
+    REAL: float,  # float4
+    REAL_ARRAY: float_array_in,  # float4[]
+    SMALLINT: int,  # int2
+    SMALLINT_ARRAY: int_array_in,  # int2[]
+    SMALLINT_VECTOR: vector_in,  # int2vector
+    TEXT: string_in,  # text
+    TEXT_ARRAY: string_array_in,  # text[]
+    TIME: time_in,  # time
+    TIME_ARRAY: time_array_in,  # time[]
+    INTERVAL: interval_in,  # interval
+    TIMESTAMP: timestamp_in,  # timestamp
+    TIMESTAMP_ARRAY: timestamp_array_in,  # timestamp
+    TIMESTAMPTZ: timestamptz_in,  # timestamptz
+    TIMESTAMPTZ_ARRAY: timestamptz_array_in,  # timestamptz
+    UNKNOWN: string_in,  # unknown
+    UUID_ARRAY: uuid_array_in,  # uuid[]
+    UUID_TYPE: uuid_in,  # uuid
+    VARCHAR: string_in,  # varchar
+    VARCHAR_ARRAY: string_array_in,  # varchar[]
+    XID: int,  # xid
+}
+
+
+# PostgreSQL encodings:
+# https://www.postgresql.org/docs/current/multibyte.html
+#
+# Python encodings:
+# https://docs.python.org/3/library/codecs.html
+#
+# Commented out encodings don't require a name change between PostgreSQL and
+# Python.  If the py side is None, then the encoding isn't supported.
+PG_PY_ENCODINGS = {
+    # Not supported:
+    "mule_internal": None,
+    "euc_tw": None,
+    # Name fine as-is:
+    # "euc_jp",
+    # "euc_jis_2004",
+    # "euc_kr",
+    # "gb18030",
+    # "gbk",
+    # "johab",
+    # "sjis",
+    # "shift_jis_2004",
+    # "uhc",
+    # "utf8",
+    # Different name:
+    "euc_cn": "gb2312",
+    "iso_8859_5": "is8859_5",
+    "iso_8859_6": "is8859_6",
+    "iso_8859_7": "is8859_7",
+    "iso_8859_8": "is8859_8",
+    "koi8": "koi8_r",
+    "latin1": "iso8859-1",
+    "latin2": "iso8859_2",
+    "latin3": "iso8859_3",
+    "latin4": "iso8859_4",
+    "latin5": "iso8859_9",
+    "latin6": "iso8859_10",
+    "latin7": "iso8859_13",
+    "latin8": "iso8859_14",
+    "latin9": "iso8859_15",
+    "sql_ascii": "ascii",
+    "win866": "cp886",
+    "win874": "cp874",
+    "win1250": "cp1250",
+    "win1251": "cp1251",
+    "win1252": "cp1252",
+    "win1253": "cp1253",
+    "win1254": "cp1254",
+    "win1255": "cp1255",
+    "win1256": "cp1256",
+    "win1257": "cp1257",
+    "win1258": "cp1258",
+    "unicode": "utf-8",  # Needed for Amazon Redshift
+}
+
+
+def make_param(py_types, value):
+    try:
+        func = py_types[type(value)]
+    except KeyError:
+        func = str
+        for k, v in py_types.items():
+            try:
+                if isinstance(value, k):
+                    func = v
+                    break
+            except TypeError:
+                pass
+
+    return func(value)
+
+
+def make_params(py_types, values):
+    return tuple([make_param(py_types, v) for v in values])
diff -pruN 1.10.6-3/pg8000/core.py 1.23.0-0ubuntu1/pg8000/core.py
--- 1.10.6-3/pg8000/core.py	2016-06-10 10:24:59.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000/core.py	2021-11-13 10:02:41.000000000 +0000
@@ -1,24 +1,40 @@
-import datetime
-from datetime import timedelta
-from warnings import warn
+import codecs
 import socket
-import threading
-from struct import pack
+import struct
+from collections import defaultdict, deque
 from hashlib import md5
-from decimal import Decimal
-from collections import deque, defaultdict
-from itertools import count, islice
-from six.moves import map
-from six import b, PY2, integer_types, next, text_type, u, binary_type
-from uuid import UUID
-from copy import deepcopy
-from calendar import timegm
-from distutils.version import LooseVersion
+from io import TextIOBase
+from itertools import count
 from struct import Struct
-import time
-import pg8000
+
+import scramp
+
+from pg8000.converters import (
+    PG_PY_ENCODINGS,
+    PG_TYPES,
+    PY_TYPES,
+    make_params,
+    string_in,
+)
+from pg8000.exceptions import DatabaseError, InterfaceError
+
+
+def pack_funcs(fmt):
+    struc = Struct(f"!{fmt}")
+    return struc.pack, struc.unpack_from
+
+
+i_pack, i_unpack = pack_funcs("i")
+h_pack, h_unpack = pack_funcs("h")
+ii_pack, ii_unpack = pack_funcs("ii")
+ihihih_pack, ihihih_unpack = pack_funcs("ihihih")
+ci_pack, ci_unpack = pack_funcs("ci")
+bh_pack, bh_unpack = pack_funcs("bh")
+cccc_pack, cccc_unpack = pack_funcs("cccc")
+
 
 # Copyright (c) 2007-2009, Mathieu Fenniak
+# Copyright (c) The Contributors
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -48,1507 +64,236 @@ import pg8000
 __author__ = "Mathieu Fenniak"
 
 
-try:
-    from json import loads
-except ImportError:
-    pass  # Can only use JSON with Python 2.6 and above
-
-
-ZERO = timedelta(0)
-
-
-class UTC(datetime.tzinfo):
-
-    def utcoffset(self, dt):
-        return ZERO
-
-    def tzname(self, dt):
-        return "UTC"
-
-    def dst(self, dt):
-        return ZERO
-
-utc = UTC()
-
-
-class Interval(object):
-    """An Interval represents a measurement of time.  In PostgreSQL, an
-    interval is defined in the measure of months, days, and microseconds; as
-    such, the pg8000 interval type represents the same information.
-
-    Note that values of the :attr:`microseconds`, :attr:`days` and
-    :attr:`months` properties are independently measured and cannot be
-    converted to each other.  A month may be 28, 29, 30, or 31 days, and a day
-    may occasionally be lengthened slightly by a leap second.
-
-    .. attribute:: microseconds
-
-        Measure of microseconds in the interval.
-
-        The microseconds value is constrained to fit into a signed 64-bit
-        integer.  Any attempt to set a value too large or too small will result
-        in an OverflowError being raised.
-
-    .. attribute:: days
-
-        Measure of days in the interval.
-
-        The days value is constrained to fit into a signed 32-bit integer.
-        Any attempt to set a value too large or too small will result in an
-        OverflowError being raised.
-
-    .. attribute:: months
-
-        Measure of months in the interval.
-
-        The months value is constrained to fit into a signed 32-bit integer.
-        Any attempt to set a value too large or too small will result in an
-        OverflowError being raised.
-    """
-
-    def __init__(self, microseconds=0, days=0, months=0):
-        self.microseconds = microseconds
-        self.days = days
-        self.months = months
-
-    def _setMicroseconds(self, value):
-        if not isinstance(value, integer_types):
-            raise TypeError("microseconds must be an integer type")
-        elif not (min_int8 < value < max_int8):
-            raise OverflowError(
-                "microseconds must be representable as a 64-bit integer")
-        else:
-            self._microseconds = value
-
-    def _setDays(self, value):
-        if not isinstance(value, integer_types):
-            raise TypeError("days must be an integer type")
-        elif not (min_int4 < value < max_int4):
-            raise OverflowError(
-                "days must be representable as a 32-bit integer")
-        else:
-            self._days = value
-
-    def _setMonths(self, value):
-        if not isinstance(value, integer_types):
-            raise TypeError("months must be an integer type")
-        elif not (min_int4 < value < max_int4):
-            raise OverflowError(
-                "months must be representable as a 32-bit integer")
-        else:
-            self._months = value
-
-    microseconds = property(lambda self: self._microseconds, _setMicroseconds)
-    days = property(lambda self: self._days, _setDays)
-    months = property(lambda self: self._months, _setMonths)
-
-    def __repr__(self):
-        return "<Interval %s months %s days %s microseconds>" % (
-            self.months, self.days, self.microseconds)
-
-    def __eq__(self, other):
-        return other is not None and isinstance(other, Interval) and \
-            self.months == other.months and self.days == other.days and \
-            self.microseconds == other.microseconds
-
-    def __neq__(self, other):
-        return not self.__eq__(other)
-
-
-def pack_funcs(fmt):
-    struc = Struct('!' + fmt)
-    return struc.pack, struc.unpack_from
-
-i_pack, i_unpack = pack_funcs('i')
-h_pack, h_unpack = pack_funcs('h')
-q_pack, q_unpack = pack_funcs('q')
-d_pack, d_unpack = pack_funcs('d')
-f_pack, f_unpack = pack_funcs('f')
-iii_pack, iii_unpack = pack_funcs('iii')
-ii_pack, ii_unpack = pack_funcs('ii')
-qii_pack, qii_unpack = pack_funcs('qii')
-dii_pack, dii_unpack = pack_funcs('dii')
-ihihih_pack, ihihih_unpack = pack_funcs('ihihih')
-ci_pack, ci_unpack = pack_funcs('ci')
-bh_pack, bh_unpack = pack_funcs('bh')
-cccc_pack, cccc_unpack = pack_funcs('cccc')
-
-
-Struct('!i')
-
-
-min_int2, max_int2 = -2 ** 15, 2 ** 15
-min_int4, max_int4 = -2 ** 31, 2 ** 31
-min_int8, max_int8 = -2 ** 63, 2 ** 63
-
-
-class Warning(Exception):
-    """Generic exception raised for important database warnings like data
-    truncations.  This exception is not currently used by pg8000.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class Error(Exception):
-    """Generic exception that is the base exception of all other error
-    exceptions.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class InterfaceError(Error):
-    """Generic exception raised for errors that are related to the database
-    interface rather than the database itself.  For example, if the interface
-    attempts to use an SSL connection but the server refuses, an InterfaceError
-    will be raised.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class DatabaseError(Error):
-    """Generic exception raised for errors that are related to the database.
-    This exception is currently never raised by pg8000.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class DataError(DatabaseError):
-    """Generic exception raised for errors that are due to problems with the
-    processed data.  This exception is not currently raised by pg8000.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class OperationalError(DatabaseError):
-    """
-    Generic exception raised for errors that are related to the database's
-    operation and not necessarily under the control of the programmer. This
-    exception is currently never raised by pg8000.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class IntegrityError(DatabaseError):
-    """
-    Generic exception raised when the relational integrity of the database is
-    affected.  This exception is not currently raised by pg8000.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class InternalError(DatabaseError):
-    """Generic exception raised when the database encounters an internal error.
-    This is currently only raised when unexpected state occurs in the pg8000
-    interface itself, and is typically the result of a interface bug.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class ProgrammingError(DatabaseError):
-    """Generic exception raised for programming errors.  For example, this
-    exception is raised if more parameter fields are in a query string than
-    there are available parameters.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class NotSupportedError(DatabaseError):
-    """Generic exception raised in case a method or database API was used which
-    is not supported by the database.
-
-    This exception is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-    pass
-
-
-class ArrayContentNotSupportedError(NotSupportedError):
-    """
-    Raised when attempting to transmit an array where the base type is not
-    supported for binary data transfer by the interface.
-    """
-    pass
-
-
-class ArrayContentNotHomogenousError(ProgrammingError):
-    """
-    Raised when attempting to transmit an array that doesn't contain only a
-    single type of object.
-    """
-    pass
-
-
-class ArrayContentEmptyError(ProgrammingError):
-    """Raised when attempting to transmit an empty array. The type oid of an
-    empty array cannot be determined, and so sending them is not permitted.
-    """
-    pass
-
-
-class ArrayDimensionsNotConsistentError(ProgrammingError):
-    """
-    Raised when attempting to transmit an array that has inconsistent
-    multi-dimension sizes.
-    """
-    pass
-
-
-class Bytea(binary_type):
-    """Bytea is a str-derived class that is mapped to a PostgreSQL byte array.
-    This class is only used in Python 2, the built-in ``bytes`` type is used in
-    Python 3.
-    """
-    pass
-
-
-def Date(year, month, day):
-    """Constuct an object holding a date value.
-
-    This function is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-
-    :rtype: :class:`datetime.date`
-    """
-    return datetime.date(year, month, day)
-
-
-def Time(hour, minute, second):
-    """Construct an object holding a time value.
-
-    This function is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-
-    :rtype: :class:`datetime.time`
-    """
-    return datetime.time(hour, minute, second)
-
-
-def Timestamp(year, month, day, hour, minute, second):
-    """Construct an object holding a timestamp value.
-
-    This function is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-
-    :rtype: :class:`datetime.datetime`
-    """
-    return datetime.datetime(year, month, day, hour, minute, second)
-
-
-def DateFromTicks(ticks):
-    """Construct an object holding a date value from the given ticks value
-    (number of seconds since the epoch).
-
-    This function is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-
-    :rtype: :class:`datetime.date`
-    """
-    return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
-    """Construct an objet holding a time value from the given ticks value
-    (number of seconds since the epoch).
-
-    This function is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-
-    :rtype: :class:`datetime.time`
-    """
-    return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
-    """Construct an object holding a timestamp value from the given ticks value
-    (number of seconds since the epoch).
-
-    This function is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-
-    :rtype: :class:`datetime.datetime`
-    """
-    return Timestamp(*time.localtime(ticks)[:6])
-
-
-def Binary(value):
-    """Construct an object holding binary data.
-
-    This function is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_.
-
-    :rtype: :class:`pg8000.types.Bytea` for Python 2, otherwise :class:`bytes`
-    """
-    if PY2:
-        return Bytea(value)
-    else:
-        return value
-
-if PY2:
-    BINARY = Bytea
-else:
-    BINARY = bytes
-
-FC_TEXT = 0
-FC_BINARY = 1
-
-BINARY_SPACE = b(" ")
-DDL_COMMANDS = b("ALTER"), b("CREATE")
-
+NULL_BYTE = b"\x00"
 
-def convert_paramstyle(style, query):
-    # I don't see any way to avoid scanning the query string char by char,
-    # so we might as well take that careful approach and create a
-    # state-based scanner.  We'll use int variables for the state.
-    #  0 -- outside quoted string
-    #  1 -- inside single-quote string '...'
-    #  2 -- inside quoted identifier   "..."
-    #  3 -- inside escaped single-quote string, E'...'
-    #  4 -- inside parameter name eg. :name
-    OUTSIDE = 0
-    INSIDE_SQ = 1
-    INSIDE_QI = 2
-    INSIDE_ES = 3
-    INSIDE_PN = 4
-
-    in_quote_escape = False
-    in_param_escape = False
-    placeholders = []
-    output_query = []
-    param_idx = map(lambda x: "$" + str(x), count(1))
-    state = OUTSIDE
-    prev_c = None
-    for i, c in enumerate(query):
-        if i + 1 < len(query):
-            next_c = query[i + 1]
-        else:
-            next_c = None
-
-        if state == OUTSIDE:
-            if c == "'":
-                output_query.append(c)
-                if prev_c == 'E':
-                    state = INSIDE_ES
-                else:
-                    state = INSIDE_SQ
-            elif c == '"':
-                output_query.append(c)
-                state = INSIDE_QI
-            elif style == "qmark" and c == "?":
-                output_query.append(next(param_idx))
-            elif style == "numeric" and c == ":":
-                output_query.append("$")
-            elif style == "named" and c == ":":
-                state = INSIDE_PN
-                placeholders.append('')
-            elif style == "pyformat" and c == '%' and next_c == "(":
-                state = INSIDE_PN
-                placeholders.append('')
-            elif style in ("format", "pyformat") and c == "%":
-                style = "format"
-                if in_param_escape:
-                    in_param_escape = False
-                    output_query.append(c)
-                else:
-                    if next_c == "%":
-                        in_param_escape = True
-                    elif next_c == "s":
-                        state = INSIDE_PN
-                        output_query.append(next(param_idx))
-                    else:
-                        raise InterfaceError(
-                            "Only %s and %% are supported in the query.")
-            else:
-                output_query.append(c)
-
-        elif state == INSIDE_SQ:
-            if c == "'":
-                output_query.append(c)
-                if in_quote_escape:
-                    in_quote_escape = False
-                else:
-                    if next_c == "'":
-                        in_quote_escape = True
-                    else:
-                        state = OUTSIDE
-            elif style in ("pyformat", "format") and c == "%":
-                # hm... we're only going to support an escaped percent sign
-                if in_param_escape:
-                    in_param_escape = False
-                    output_query.append(c)
-                else:
-                    if next_c == "%":
-                        in_param_escape = True
-                    else:
-                        raise InterfaceError(
-                            "'%" + next_c + "' not supported in a quoted "
-                            "string within the query string")
-            else:
-                output_query.append(c)
-
-        elif state == INSIDE_QI:
-            if c == '"':
-                state = OUTSIDE
-                output_query.append(c)
-            elif style in ("pyformat", "format") and c == "%":
-                # hm... we're only going to support an escaped percent sign
-                if in_param_escape:
-                    in_param_escape = False
-                    output_query.append(c)
-                else:
-                    if next_c == "%":
-                        in_param_escape = True
-                    else:
-                        raise InterfaceError(
-                            "'%" + next_c + "' not supported in a quoted "
-                            "string within the query string")
-            else:
-                output_query.append(c)
-
-        elif state == INSIDE_ES:
-            if c == "'" and prev_c != "\\":
-                # check for escaped single-quote
-                output_query.append(c)
-                state = OUTSIDE
-            elif style in ("pyformat", "format") and c == "%":
-                # hm... we're only going to support an escaped percent sign
-                if in_param_escape:
-                    in_param_escape = False
-                    output_query.append(c)
-                else:
-                    if next_c == "%":
-                        in_param_escape = True
-                    else:
-                        raise InterfaceError(
-                            "'%" + next_c + "' not supported in a quoted "
-                            "string within the query string.")
-            else:
-                output_query.append(c)
-
-        elif state == INSIDE_PN:
-            if style == 'named':
-                placeholders[-1] += c
-                if next_c is None or (not next_c.isalnum() and next_c != '_'):
-                    state = OUTSIDE
-                    try:
-                        pidx = placeholders.index(placeholders[-1], 0, -1)
-                        output_query.append("$" + str(pidx + 1))
-                        del placeholders[-1]
-                    except ValueError:
-                        output_query.append("$" + str(len(placeholders)))
-            elif style == 'pyformat':
-                if prev_c == ')' and c == "s":
-                    state = OUTSIDE
-                    try:
-                        pidx = placeholders.index(placeholders[-1], 0, -1)
-                        output_query.append("$" + str(pidx + 1))
-                        del placeholders[-1]
-                    except ValueError:
-                        output_query.append("$" + str(len(placeholders)))
-                elif c in "()":
-                    pass
-                else:
-                    placeholders[-1] += c
-            elif style == 'format':
-                state = OUTSIDE
-
-        prev_c = c
-
-    if style in ('numeric', 'qmark', 'format'):
-        def make_args(vals):
-            return vals
-    else:
-        def make_args(vals):
-            return tuple(vals[p] for p in placeholders)
-
-    return ''.join(output_query), make_args
-
-
-EPOCH = datetime.datetime(2000, 1, 1)
-EPOCH_TZ = EPOCH.replace(tzinfo=utc)
-EPOCH_SECONDS = timegm(EPOCH.timetuple())
-utcfromtimestamp = datetime.datetime.utcfromtimestamp
-
-INFINITY_MICROSECONDS = 2 ** 63 - 1
-MINUS_INFINITY_MICROSECONDS = -1 * INFINITY_MICROSECONDS - 1
-
-
-# data is 64-bit integer representing microseconds since 2000-01-01
-def timestamp_recv_integer(data, offset, length):
-    micros = q_unpack(data, offset)[0]
-    try:
-        return EPOCH + timedelta(microseconds=micros)
-    except OverflowError as e:
-        if micros == INFINITY_MICROSECONDS:
-            return datetime.datetime.max
-        elif micros == MINUS_INFINITY_MICROSECONDS:
-            return datetime.datetime.min
-        else:
-            raise e
-
-
-# data is double-precision float representing seconds since 2000-01-01
-def timestamp_recv_float(data, offset, length):
-    return utcfromtimestamp(EPOCH_SECONDS + d_unpack(data, offset)[0])
-
-
-# data is 64-bit integer representing microseconds since 2000-01-01
-def timestamp_send_integer(v):
-    if v == datetime.datetime.max:
-        micros = INFINITY_MICROSECONDS
-    elif v == datetime.datetime.min:
-        micros = MINUS_INFINITY_MICROSECONDS
-    else:
-        micros = int(
-            (timegm(v.timetuple()) - EPOCH_SECONDS) * 1e6) + v.microsecond
-    return q_pack(micros)
-
-
-# data is double-precision float representing seconds since 2000-01-01
-def timestamp_send_float(v):
-    return d_pack(timegm(v.timetuple()) + v.microsecond / 1e6 - EPOCH_SECONDS)
-
-
-def timestamptz_send_integer(v):
-    # timestamps should be sent as UTC.  If they have zone info,
-    # convert them.
-    return timestamp_send_integer(v.astimezone(utc).replace(tzinfo=None))
-
-
-def timestamptz_send_float(v):
-    # timestamps should be sent as UTC.  If they have zone info,
-    # convert them.
-    return timestamp_send_float(v.astimezone(utc).replace(tzinfo=None))
-
-DATETIME_MAX_TZ = datetime.datetime.max.replace(tzinfo=utc)
-DATETIME_MIN_TZ = datetime.datetime.min.replace(tzinfo=utc)
-
-
-# return a timezone-aware datetime instance if we're reading from a
-# "timestamp with timezone" type.  The timezone returned will always be
-# UTC, but providing that additional information can permit conversion
-# to local.
-def timestamptz_recv_integer(data, offset, length):
-    micros = q_unpack(data, offset)[0]
-    try:
-        return EPOCH_TZ + timedelta(microseconds=micros)
-    except OverflowError as e:
-        if micros == INFINITY_MICROSECONDS:
-            return DATETIME_MAX_TZ
-        elif micros == MINUS_INFINITY_MICROSECONDS:
-            return DATETIME_MIN_TZ
-        else:
-            raise e
-
-
-def timestamptz_recv_float(data, offset, length):
-    return timestamp_recv_float(data, offset, length).replace(tzinfo=utc)
-
-
-def interval_send_integer(v):
-    microseconds = v.microseconds
-    try:
-        microseconds += int(v.seconds * 1e6)
-    except AttributeError:
-        pass
-
-    try:
-        months = v.months
-    except AttributeError:
-        months = 0
-
-    return qii_pack(microseconds, v.days, months)
-
-
-def interval_send_float(v):
-    seconds = v.microseconds / 1000.0 / 1000.0
-    try:
-        seconds += v.seconds
-    except AttributeError:
-        pass
-
-    try:
-        months = v.months
-    except AttributeError:
-        months = 0
-
-    return dii_pack(seconds, v.days, months)
-
-
-def interval_recv_integer(data, offset, length):
-    microseconds, days, months = qii_unpack(data, offset)
-    if months == 0:
-        seconds, micros = divmod(microseconds, 1e6)
-        return datetime.timedelta(days, seconds, micros)
-    else:
-        return Interval(microseconds, days, months)
-
-
-def interval_recv_float(data, offset, length):
-    seconds, days, months = dii_unpack(data, offset)
-    if months == 0:
-        secs, microseconds = divmod(seconds, 1e6)
-        return datetime.timedelta(days, secs, microseconds)
-    else:
-        return Interval(int(seconds * 1000 * 1000), days, months)
-
-
-def int8_recv(data, offset, length):
-    return q_unpack(data, offset)[0]
-
-
-def int2_recv(data, offset, length):
-    return h_unpack(data, offset)[0]
-
-
-def int4_recv(data, offset, length):
-    return i_unpack(data, offset)[0]
-
-
-def float4_recv(data, offset, length):
-    return f_unpack(data, offset)[0]
-
-
-def float8_recv(data, offset, length):
-    return d_unpack(data, offset)[0]
-
-
-def bytea_send(v):
-    return v
-
-# bytea
-if PY2:
-    def bytea_recv(data, offset, length):
-        return Bytea(data[offset:offset + length])
-else:
-    def bytea_recv(data, offset, length):
-        return data[offset:offset + length]
-
-
-def uuid_send(v):
-    return v.bytes
-
-
-def uuid_recv(data, offset, length):
-    return UUID(bytes=data[offset:offset+length])
-
-
-TRUE = b("\x01")
-FALSE = b("\x00")
-
-
-def bool_send(v):
-    return TRUE if v else FALSE
-
-
-NULL = i_pack(-1)
-
-NULL_BYTE = b('\x00')
-
-
-def null_send(v):
-    return NULL
-
-
-def int_in(data, offset, length):
-    return int(data[offset: offset + length])
-
-
-class Cursor():
-    """A cursor object is returned by the :meth:`~Connection.cursor` method of
-    a connection. It has the following attributes and methods:
-
-    .. attribute:: arraysize
-
-        This read/write attribute specifies the number of rows to fetch at a
-        time with :meth:`fetchmany`.  It defaults to 1.
-
-    .. attribute:: connection
-
-        This read-only attribute contains a reference to the connection object
-        (an instance of :class:`Connection`) on which the cursor was
-        created.
-
-        This attribute is part of a DBAPI 2.0 extension.  Accessing this
-        attribute will generate the following warning: ``DB-API extension
-        cursor.connection used``.
-
-    .. attribute:: rowcount
-
-        This read-only attribute contains the number of rows that the last
-        ``execute()`` or ``executemany()`` method produced (for query
-        statements like ``SELECT``) or affected (for modification statements
-        like ``UPDATE``).
-
-        The value is -1 if:
-
-        - No ``execute()`` or ``executemany()`` method has been performed yet
-          on the cursor.
-        - There was no rowcount associated with the last ``execute()``.
-        - At least one of the statements executed as part of an
-          ``executemany()`` had no row count associated with it.
-        - Using a ``SELECT`` query statement on PostgreSQL server older than
-          version 9.
-        - Using a ``COPY`` query statement on PostgreSQL server version 8.1 or
-          older.
-
-        This attribute is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-
-    .. attribute:: description
-
-        This read-only attribute is a sequence of 7-item sequences.  Each value
-        contains information describing one result column.  The 7 items
-        returned for each column are (name, type_code, display_size,
-        internal_size, precision, scale, null_ok).  Only the first two values
-        are provided by the current implementation.
-
-        This attribute is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-    """
-
-    def __init__(self, connection):
-        self._c = connection
-        self.arraysize = 1
-        self.ps = None
-        self._row_count = -1
-        self._cached_rows = deque()
-        self.portal_name = None
-        self.portal_suspended = False
-
-    @property
-    def connection(self):
-        warn("DB-API extension cursor.connection used", stacklevel=3)
-        return self._c
-
-    @property
-    def rowcount(self):
-        return self._row_count
-
-    description = property(lambda self: self._getDescription())
-
-    def _getDescription(self):
-        if self.ps is None:
-            return None
-        row_desc = self.ps['row_desc']
-        if len(row_desc) == 0:
-            return None
-        columns = []
-        for col in row_desc:
-            columns.append(
-                (col["name"], col["type_oid"], None, None, None, None, None))
-        return columns
-
-    ##
-    # Executes a database operation.  Parameters may be provided as a sequence
-    # or mapping and will be bound to variables in the operation.
-    # <p>
-    # Stability: Part of the DBAPI 2.0 specification.
-    def execute(self, operation, args=None, stream=None):
-        """Executes a database operation.  Parameters may be provided as a
-        sequence, or as a mapping, depending upon the value of
-        :data:`pg8000.paramstyle`.
-
-        This method is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-
-        :param operation:
-            The SQL statement to execute.
-
-        :param args:
-            If :data:`paramstyle` is ``qmark``, ``numeric``, or ``format``,
-            this argument should be an array of parameters to bind into the
-            statement.  If :data:`paramstyle` is ``named``, the argument should
-            be a dict mapping of parameters.  If the :data:`paramstyle` is
-            ``pyformat``, the argument value may be either an array or a
-            mapping.
-
-        :param stream: This is a pg8000 extension for use with the PostgreSQL
-            `COPY
-            <http://www.postgresql.org/docs/current/static/sql-copy.html>`_
-            command. For a COPY FROM the parameter must be a readable file-like
-            object, and for COPY TO it must be writable.
-
-            .. versionadded:: 1.9.11
-        """
-        try:
-            with self._c._lock:
-                self.stream = stream
-
-                if not self._c.in_transaction and not self._c.autocommit:
-                    self._c.execute(self, "begin transaction", None)
-                self._c.execute(self, operation, args)
-        except AttributeError as e:
-            if self._c is None:
-                raise InterfaceError("Cursor closed")
-            elif self._c._sock is None:
-                raise InterfaceError("connection is closed")
-            else:
-                raise e
-
-    def executemany(self, operation, param_sets):
-        """Prepare a database operation, and then execute it against all
-        parameter sequences or mappings provided.
-
-        This method is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-
-        :param operation:
-            The SQL statement to execute
-        :param parameter_sets:
-            A sequence of parameters to execute the statement with. The values
-            in the sequence should be sequences or mappings of parameters, the
-            same as the args argument of the :meth:`execute` method.
-        """
-        rowcounts = []
-        for parameters in param_sets:
-            self.execute(operation, parameters)
-            rowcounts.append(self._row_count)
-
-        self._row_count = -1 if -1 in rowcounts else sum(rowcounts)
-
-    def fetchone(self):
-        """Fetch the next row of a query result set.
-
-        This method is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-
-        :returns:
-            A row as a sequence of field values, or ``None`` if no more rows
-            are available.
-        """
-        try:
-            return next(self)
-        except StopIteration:
-            return None
-        except TypeError:
-            raise ProgrammingError("attempting to use unexecuted cursor")
-        except AttributeError:
-            raise ProgrammingError("attempting to use unexecuted cursor")
-
-    def fetchmany(self, num=None):
-        """Fetches the next set of rows of a query result.
-
-        This method is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-
-        :param size:
-
-            The number of rows to fetch when called.  If not provided, the
-            :attr:`arraysize` attribute value is used instead.
-
-        :returns:
-
-            A sequence, each entry of which is a sequence of field values
-            making up a row.  If no more rows are available, an empty sequence
-            will be returned.
-        """
-        try:
-            return tuple(
-                islice(self, self.arraysize if num is None else num))
-        except TypeError:
-            raise ProgrammingError("attempting to use unexecuted cursor")
-
-    def fetchall(self):
-        """Fetches all remaining rows of a query result.
-
-        This method is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-
-        :returns:
-
-            A sequence, each entry of which is a sequence of field values
-            making up a row.
-        """
-        try:
-            return tuple(self)
-        except TypeError:
-            raise ProgrammingError("attempting to use unexecuted cursor")
-
-    def close(self):
-        """Closes the cursor.
-
-        This method is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        self._c = None
-
-    def __iter__(self):
-        """A cursor object is iterable to retrieve the rows from a query.
-
-        This is a DBAPI 2.0 extension.
-        """
-        return self
-
-    def setinputsizes(self, sizes):
-        """This method is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_, however, it is not
-        implemented by pg8000.
-        """
-        pass
-
-    def setoutputsize(self, size, column=None):
-        """This method is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_, however, it is not
-        implemented by pg8000.
-        """
-        pass
-
-    def __next__(self):
-        with self._c._lock:
-            try:
-                return self._cached_rows.popleft()
-            except IndexError:
-                if self.portal_suspended:
-                    self._c.send_EXECUTE(self)
-                    self._c._write(SYNC_MSG)
-                    self._c._flush()
-                    self._c.handle_messages(self)
-                    if not self.portal_suspended:
-                        self._c.close_portal(self)
-                try:
-                    return self._cached_rows.popleft()
-                except IndexError:
-                    if self.ps is None:
-                        raise ProgrammingError("A query hasn't been issued.")
-                    elif len(self.ps['row_desc']) == 0:
-                        raise ProgrammingError("no result set")
-                    else:
-                        raise StopIteration()
-
-if PY2:
-    Cursor.next = Cursor.__next__
 
 # Message codes
-NOTICE_RESPONSE = b("N")
-AUTHENTICATION_REQUEST = b("R")
-PARAMETER_STATUS = b("S")
-BACKEND_KEY_DATA = b("K")
-READY_FOR_QUERY = b("Z")
-ROW_DESCRIPTION = b("T")
-ERROR_RESPONSE = b("E")
-DATA_ROW = b("D")
-COMMAND_COMPLETE = b("C")
-PARSE_COMPLETE = b("1")
-BIND_COMPLETE = b("2")
-CLOSE_COMPLETE = b("3")
-PORTAL_SUSPENDED = b("s")
-NO_DATA = b("n")
-PARAMETER_DESCRIPTION = b("t")
-NOTIFICATION_RESPONSE = b("A")
-COPY_DONE = b("c")
-COPY_DATA = b("d")
-COPY_IN_RESPONSE = b("G")
-COPY_OUT_RESPONSE = b("H")
-EMPTY_QUERY_RESPONSE = b("I")
-
-BIND = b("B")
-PARSE = b("P")
-EXECUTE = b("E")
-FLUSH = b('H')
-SYNC = b('S')
-PASSWORD = b('p')
-DESCRIBE = b('D')
-TERMINATE = b('X')
-CLOSE = b('C')
-
-FLUSH_MSG = FLUSH + i_pack(4)
-SYNC_MSG = SYNC + i_pack(4)
-TERMINATE_MSG = TERMINATE + i_pack(4)
-COPY_DONE_MSG = COPY_DONE + i_pack(4)
+NOTICE_RESPONSE = b"N"
+AUTHENTICATION_REQUEST = b"R"
+PARAMETER_STATUS = b"S"
+BACKEND_KEY_DATA = b"K"
+READY_FOR_QUERY = b"Z"
+ROW_DESCRIPTION = b"T"
+ERROR_RESPONSE = b"E"
+DATA_ROW = b"D"
+COMMAND_COMPLETE = b"C"
+PARSE_COMPLETE = b"1"
+BIND_COMPLETE = b"2"
+CLOSE_COMPLETE = b"3"
+PORTAL_SUSPENDED = b"s"
+NO_DATA = b"n"
+PARAMETER_DESCRIPTION = b"t"
+NOTIFICATION_RESPONSE = b"A"
+COPY_DONE = b"c"
+COPY_DATA = b"d"
+COPY_IN_RESPONSE = b"G"
+COPY_OUT_RESPONSE = b"H"
+EMPTY_QUERY_RESPONSE = b"I"
+
+BIND = b"B"
+PARSE = b"P"
+QUERY = b"Q"
+EXECUTE = b"E"
+FLUSH = b"H"
+SYNC = b"S"
+PASSWORD = b"p"
+DESCRIBE = b"D"
+TERMINATE = b"X"
+CLOSE = b"C"
+
+
+def _create_message(code, data=b""):
+    return code + i_pack(len(data) + 4) + data
+
+
+FLUSH_MSG = _create_message(FLUSH)
+SYNC_MSG = _create_message(SYNC)
+TERMINATE_MSG = _create_message(TERMINATE)
+COPY_DONE_MSG = _create_message(COPY_DONE)
+EXECUTE_MSG = _create_message(EXECUTE, NULL_BYTE + i_pack(0))
 
 # DESCRIBE constants
-STATEMENT = b('S')
-PORTAL = b('P')
+STATEMENT = b"S"
+PORTAL = b"P"
 
 # ErrorResponse codes
-RESPONSE_SEVERITY = b("S")  # always present
-RESPONSE_CODE = b("C")  # always present
-RESPONSE_MSG = b("M")  # always present
-RESPONSE_DETAIL = b("D")
-RESPONSE_HINT = b("H")
-RESPONSE_POSITION = b("P")
-RESPONSE__POSITION = b("p")
-RESPONSE__QUERY = b("q")
-RESPONSE_WHERE = b("W")
-RESPONSE_FILE = b("F")
-RESPONSE_LINE = b("L")
-RESPONSE_ROUTINE = b("R")
-
-IDLE = b("I")
-IDLE_IN_TRANSACTION = b("T")
-IDLE_IN_FAILED_TRANSACTION = b("E")
-
-
-arr_trans = dict(zip(map(ord, u("[] 'u")), list(u('{}')) + [None] * 3))
-
-
-class MulticastDelegate(object):
-    def __init__(self):
-        self.delegates = []
-
-    def __iadd__(self, delegate):
-        self.add(delegate)
-        return self
+RESPONSE_SEVERITY = "S"  # always present
+RESPONSE_SEVERITY = "V"  # always present
+RESPONSE_CODE = "C"  # always present
+RESPONSE_MSG = "M"  # always present
+RESPONSE_DETAIL = "D"
+RESPONSE_HINT = "H"
+RESPONSE_POSITION = "P"
+RESPONSE__POSITION = "p"
+RESPONSE__QUERY = "q"
+RESPONSE_WHERE = "W"
+RESPONSE_FILE = "F"
+RESPONSE_LINE = "L"
+RESPONSE_ROUTINE = "R"
+
+IDLE = b"I"
+IDLE_IN_TRANSACTION = b"T"
+IDLE_IN_FAILED_TRANSACTION = b"E"
 
-    def add(self, delegate):
-        self.delegates.append(delegate)
 
-    def __isub__(self, delegate):
-        self.delegates.remove(delegate)
+class CoreConnection:
+    def __enter__(self):
         return self
 
-    def __call__(self, *args, **kwargs):
-        for d in self.delegates:
-            d(*args, **kwargs)
-
-
-class Connection(object):
-    """A connection object is returned by the :func:`pg8000.connect` function.
-    It represents a single physical connection to a PostgreSQL database.
-
-    .. attribute:: Connection.notifies
-
-        A list of server-side notifications received by this database
-        connection (via the LISTEN/NOTIFY PostgreSQL commands).  Each list
-        element is a two-element tuple containing the PostgreSQL backend PID
-        that issued the notify, and the notification name.
-
-        PostgreSQL will only send notifications to a client between
-        transactions.  The contents of this property are generally only
-        populated after a commit or rollback of the current transaction.
-
-        This list can be modified by a client application to clean out
-        notifications as they are handled.  However, inspecting or modifying
-        this collection should only be done while holding the
-        :attr:`notifies_lock` lock in order to guarantee thread-safety.
-
-        This attribute is not part of the DBAPI standard; it is a pg8000
-        extension.
-
-        .. versionadded:: 1.07
-
-    .. attribute:: Connection.notifies_lock
-
-        A :class:`threading.Lock` object that should be held to read or
-        modify the contents of the :attr:`notifies` list.
-
-        This attribute is not part of the DBAPI standard; it is a pg8000
-        extension.
-
-        .. versionadded:: 1.07
-
-    .. attribute:: Connection.autocommit
-
-        Following the DB-API specification, autocommit is off by default.
-        It can be turned on by setting this boolean pg8000-specific autocommit
-        property to True.
-
-        .. versionadded:: 1.9
-
-    .. exception:: Connection.Error
-                   Connection.Warning
-                   Connection.InterfaceError
-                   Connection.DatabaseError
-                   Connection.InternalError
-                   Connection.OperationalError
-                   Connection.ProgrammingError
-                   Connection.IntegrityError
-                   Connection.DataError
-                   Connection.NotSupportedError
-
-        All of the standard database exception types are accessible via
-        connection instances.
-
-        This is a DBAPI 2.0 extension.  Accessing any of these attributes will
-        generate the warning ``DB-API extension connection.DatabaseError
-        used``.
-    """
-
-    # DBAPI Extension: supply exceptions as attributes on the connection
-    Warning = property(lambda self: self._getError(Warning))
-    Error = property(lambda self: self._getError(Error))
-    InterfaceError = property(lambda self: self._getError(InterfaceError))
-    DatabaseError = property(lambda self: self._getError(DatabaseError))
-    OperationalError = property(lambda self: self._getError(OperationalError))
-    IntegrityError = property(lambda self: self._getError(IntegrityError))
-    InternalError = property(lambda self: self._getError(InternalError))
-    ProgrammingError = property(lambda self: self._getError(ProgrammingError))
-    NotSupportedError = property(
-        lambda self: self._getError(NotSupportedError))
-
-    # Determines the number of rows to read from the database server at once.
-    # Reading more rows increases performance at the cost of memory.  The
-    # default value is 100 rows.  The effect of this parameter is transparent.
-    # That is, the library reads more rows when the cache is empty
-    # automatically.
-    _row_cache_size = 100
-    _row_cache_size_bin = i_pack(_row_cache_size)
-
-    def _getError(self, error):
-        warn(
-            "DB-API extension connection.%s used" %
-            error.__name__, stacklevel=3)
-        return error
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.close()
 
     def __init__(
-            self, user, host, unix_sock, port, database, password, ssl,
-            timeout):
+        self,
+        user,
+        host="localhost",
+        database=None,
+        port=5432,
+        password=None,
+        source_address=None,
+        unix_sock=None,
+        ssl_context=None,
+        timeout=None,
+        tcp_keepalive=True,
+        application_name=None,
+        replication=None,
+    ):
         self._client_encoding = "utf8"
         self._commands_with_count = (
-            b("INSERT"), b("DELETE"), b("UPDATE"), b("MOVE"),
-            b("FETCH"), b("COPY"), b("SELECT"))
-        self._lock = threading.Lock()
+            b"INSERT",
+            b"DELETE",
+            b"UPDATE",
+            b"MOVE",
+            b"FETCH",
+            b"COPY",
+            b"SELECT",
+        )
+        self.notifications = deque(maxlen=100)
+        self.notices = deque(maxlen=100)
+        self.parameter_statuses = deque(maxlen=100)
 
         if user is None:
-            raise InterfaceError(
-                "The 'user' connection parameter cannot be None")
+            raise InterfaceError("The 'user' connection parameter cannot be None")
 
-        if isinstance(user, text_type):
-            self.user = user.encode('utf8')
-        else:
-            self.user = user
+        init_params = {
+            "user": user,
+            "database": database,
+            "application_name": application_name,
+            "replication": replication,
+        }
 
-        if isinstance(password, text_type):
-            self.password = password.encode('utf8')
+        for k, v in tuple(init_params.items()):
+            if isinstance(v, str):
+                init_params[k] = v.encode("utf8")
+            elif v is None:
+                del init_params[k]
+            elif not isinstance(v, (bytes, bytearray)):
+                raise InterfaceError(f"The parameter {k} can't be of type {type(v)}.")
+
+        self.user = init_params["user"]
+
+        if isinstance(password, str):
+            self.password = password.encode("utf8")
         else:
             self.password = password
 
         self.autocommit = False
         self._xid = None
+        self._statement_nums = set()
 
-        self._caches = defaultdict(lambda: defaultdict(dict))
-        self.statement_number = 0
-        self.portal_number = 0
+        self._caches = {}
 
-        try:
-            if unix_sock is None and host is not None:
-                self._usock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            elif unix_sock is not None:
+        if unix_sock is None and host is not None:
+            try:
+                self._usock = socket.create_connection(
+                    (host, port), timeout, source_address
+                )
+            except socket.error as e:
+                raise InterfaceError(
+                    f"Can't create a connection to host {host} and port {port} "
+                    f"(timeout is {timeout} and source_address is {source_address})."
+                ) from e
+
+        elif unix_sock is not None:
+            try:
                 if not hasattr(socket, "AF_UNIX"):
                     raise InterfaceError(
-                        "attempt to connect to unix socket on unsupported "
-                        "platform")
+                        "attempt to connect to unix socket on unsupported platform"
+                    )
                 self._usock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            else:
-                raise ProgrammingError(
-                    "one of host or unix_sock must be provided")
-            if not PY2 and timeout is not None:
                 self._usock.settimeout(timeout)
-
-            if unix_sock is None and host is not None:
-                self._usock.connect((host, port))
-            elif unix_sock is not None:
                 self._usock.connect(unix_sock)
+            except socket.error as e:
+                if self._usock is not None:
+                    self._usock.close()
+                raise InterfaceError("communication error") from e
 
-            if ssl:
-                with self._lock:
-                    try:
-                        import ssl as sslmodule
-                        # Int32(8) - Message length, including self.
-                        # Int32(80877103) - The SSL request code.
-                        self._usock.sendall(ii_pack(8, 80877103))
-                        resp = self._usock.recv(1)
-                        if resp == b('S'):
-                            self._usock = sslmodule.wrap_socket(self._usock)
-                        else:
-                            raise InterfaceError("Server refuses SSL")
-                    except ImportError:
-                        raise InterfaceError(
-                            "SSL required but ssl module not available in "
-                            "this python installation")
+        else:
+            raise InterfaceError("one of host or unix_sock must be provided")
 
-            self._sock = self._usock.makefile(mode="rwb")
-        except socket.error as e:
-            self._usock.close()
-            raise InterfaceError("communication error", e)
-        self._flush = self._sock.flush
-        self._read = self._sock.read
-        self._write = self._sock.write
-        self._backend_key_data = None
+        if tcp_keepalive:
+            self._usock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
 
-        ##
-        # An event handler that is fired when the database server issues a
-        # notice.
-        # The value of this property is a MulticastDelegate. A callback
-        # can be added by using connection.NotificationReceived += SomeMethod.
-        # The method will be called with a single argument, an object that has
-        # properties: severity, code, msg, and possibly others (detail, hint,
-        # position, where, file, line, and routine). Callbacks can be removed
-        # with the -= operator.
-        # <p>
-        # Stability: Added in v1.03, stability guaranteed for v1.xx.
-        self.NoticeReceived = MulticastDelegate()
-
-        ##
-        # An event handler that is fired when a runtime configuration option is
-        # changed on the server.  The value of this property is a
-        # MulticastDelegate.  A callback can be added by using
-        # connection.NotificationReceived += SomeMethod. Callbacks can be
-        # removed with the -= operator. The method will be called with a single
-        # argument, an object that has properties "key" and "value".
-        # <p>
-        # Stability: Added in v1.03, stability guaranteed for v1.xx.
-        self.ParameterStatusReceived = MulticastDelegate()
-
-        ##
-        # An event handler that is fired when NOTIFY occurs for a notification
-        # that has been LISTEN'd for.  The value of this property is a
-        # MulticastDelegate.  A callback can be added by using
-        # connection.NotificationReceived += SomeMethod. The method will be
-        # called with a single argument, an object that has properties:
-        # backend_pid, condition, and additional_info. Callbacks can be
-        # removed with the -= operator.
-        # <p>
-        # Stability: Added in v1.03, stability guaranteed for v1.xx.
-        self.NotificationReceived = MulticastDelegate()
-
-        self.ParameterStatusReceived += self.handle_PARAMETER_STATUS
-
-        def text_out(v):
-            return v.encode(self._client_encoding)
-
-        def time_out(v):
-            return v.isoformat().encode(self._client_encoding)
-
-        def date_out(v):
-            if v == datetime.date.max:
-                return 'infinity'.encode(self._client_encoding)
-            elif v == datetime.date.min:
-                return '-infinity'.encode(self._client_encoding)
-            else:
-                return v.isoformat().encode(self._client_encoding)
+        self.channel_binding = None
+        if ssl_context is not None:
+            try:
+                import ssl
 
-        def unknown_out(v):
-            return str(v).encode(self._client_encoding)
+                if ssl_context is True:
+                    ssl_context = ssl.create_default_context()
 
-        trans_tab = dict(zip(map(ord, u('{}')), u('[]')))
-        glbls = {'Decimal': Decimal}
+                request_ssl = getattr(ssl_context, "request_ssl", True)
 
-        def array_in(data, idx, length):
-            arr = []
-            prev_c = None
-            for c in data[idx:idx+length].decode(
-                    self._client_encoding).translate(
-                    trans_tab).replace(u('NULL'), u('None')):
-                if c not in ('[', ']', ',', 'N') and prev_c in ('[', ','):
-                    arr.extend("Decimal('")
-                elif c in (']', ',') and prev_c not in ('[', ']', ',', 'e'):
-                    arr.extend("')")
-
-                arr.append(c)
-                prev_c = c
-            return eval(''.join(arr), glbls)
-
-        def array_recv(data, idx, length):
-            final_idx = idx + length
-            dim, hasnull, typeoid = iii_unpack(data, idx)
-            idx += 12
-
-            # get type conversion method for typeoid
-            conversion = self.pg_types[typeoid][1]
-
-            # Read dimension info
-            dim_lengths = []
-            for i in range(dim):
-                dim_lengths.append(ii_unpack(data, idx)[0])
-                idx += 8
-
-            # Read all array values
-            values = []
-            while idx < final_idx:
-                element_len, = i_unpack(data, idx)
-                idx += 4
-                if element_len == -1:
-                    values.append(None)
-                else:
-                    values.append(conversion(data, idx, element_len))
-                    idx += element_len
-
-            # at this point, {{1,2,3},{4,5,6}}::int[][] looks like
-            # [1,2,3,4,5,6]. go through the dimensions and fix up the array
-            # contents to match expected dimensions
-            for length in reversed(dim_lengths[1:]):
-                values = list(map(list, zip(*[iter(values)] * length)))
-            return values
-
-        def vector_in(data, idx, length):
-            return eval('[' + data[idx:idx+length].decode(
-                self._client_encoding).replace(' ', ',') + ']')
-
-        if PY2:
-            def text_recv(data, offset, length):
-                return unicode(  # noqa
-                    data[offset: offset + length], self._client_encoding)
-
-            def bool_recv(d, o, l):
-                return d[o] == "\x01"
-
-            def json_in(data, offset, length):
-                return loads(unicode(  # noqa
-                    data[offset: offset + length], self._client_encoding))
+                if request_ssl:
+                    # Int32(8) - Message length, including self.
+                    # Int32(80877103) - The SSL request code.
+                    self._usock.sendall(ii_pack(8, 80877103))
+                    resp = self._usock.recv(1)
+                    if resp != b"S":
+                        raise InterfaceError("Server refuses SSL")
 
-        else:
-            def text_recv(data, offset, length):
-                return str(
-                    data[offset: offset + length], self._client_encoding)
-
-            def bool_recv(data, offset, length):
-                return data[offset] == 1
-
-            def json_in(data, offset, length):
-                return loads(
-                    str(data[offset: offset + length], self._client_encoding))
-
-        def time_in(data, offset, length):
-            hour = int(data[offset:offset + 2])
-            minute = int(data[offset + 3:offset + 5])
-            sec = Decimal(
-                data[offset + 6:offset + length].decode(self._client_encoding))
-            return datetime.time(
-                hour, minute, int(sec), int((sec - int(sec)) * 1000000))
-
-        def date_in(data, offset, length):
-            year_str = data[offset:offset + 4].decode(self._client_encoding)
-            if year_str == 'infi':
-                return datetime.date.max
-            elif year_str == '-inf':
-                return datetime.date.min
-            else:
-                return datetime.date(
-                    int(year_str), int(data[offset + 5:offset + 7]),
-                    int(data[offset + 8:offset + 10]))
-
-        def numeric_in(data, offset, length):
-            return Decimal(
-                data[offset: offset + length].decode(self._client_encoding))
-
-        def numeric_out(d):
-            return str(d).encode(self._client_encoding)
-
-        self.pg_types = defaultdict(
-            lambda: (FC_TEXT, text_recv), {
-                16: (FC_BINARY, bool_recv),  # boolean
-                17: (FC_BINARY, bytea_recv),  # bytea
-                19: (FC_BINARY, text_recv),  # name type
-                20: (FC_BINARY, int8_recv),  # int8
-                21: (FC_BINARY, int2_recv),  # int2
-                22: (FC_TEXT, vector_in),  # int2vector
-                23: (FC_BINARY, int4_recv),  # int4
-                25: (FC_BINARY, text_recv),  # TEXT type
-                26: (FC_TEXT, int_in),  # oid
-                28: (FC_TEXT, int_in),  # xid
-                114: (FC_TEXT, json_in),  # json
-                700: (FC_BINARY, float4_recv),  # float4
-                701: (FC_BINARY, float8_recv),  # float8
-                705: (FC_BINARY, text_recv),  # unknown
-                829: (FC_TEXT, text_recv),  # MACADDR type
-                1000: (FC_BINARY, array_recv),  # BOOL[]
-                1003: (FC_BINARY, array_recv),  # NAME[]
-                1005: (FC_BINARY, array_recv),  # INT2[]
-                1007: (FC_BINARY, array_recv),  # INT4[]
-                1009: (FC_BINARY, array_recv),  # TEXT[]
-                1014: (FC_BINARY, array_recv),  # CHAR[]
-                1015: (FC_BINARY, array_recv),  # VARCHAR[]
-                1016: (FC_BINARY, array_recv),  # INT8[]
-                1021: (FC_BINARY, array_recv),  # FLOAT4[]
-                1022: (FC_BINARY, array_recv),  # FLOAT8[]
-                1042: (FC_BINARY, text_recv),  # CHAR type
-                1043: (FC_BINARY, text_recv),  # VARCHAR type
-                1082: (FC_TEXT, date_in),  # date
-                1083: (FC_TEXT, time_in),
-                1114: (FC_BINARY, timestamp_recv_float),  # timestamp w/ tz
-                1184: (FC_BINARY, timestamptz_recv_float),
-                1186: (FC_BINARY, interval_recv_integer),
-                1231: (FC_TEXT, array_in),  # NUMERIC[]
-                1263: (FC_BINARY, array_recv),  # cstring[]
-                1700: (FC_TEXT, numeric_in),  # NUMERIC
-                2275: (FC_BINARY, text_recv),  # cstring
-                2950: (FC_BINARY, uuid_recv),  # uuid
-                3802: (FC_TEXT, json_in),  # jsonb
-            })
-
-        self.py_types = {
-            type(None): (-1, FC_BINARY, null_send),  # null
-            bool: (16, FC_BINARY, bool_send),
-            int: (705, FC_TEXT, unknown_out),
-            float: (701, FC_BINARY, d_pack),  # float8
-            datetime.date: (1082, FC_TEXT, date_out),  # date
-            datetime.time: (1083, FC_TEXT, time_out),  # time
-            1114: (1114, FC_BINARY, timestamp_send_integer),  # timestamp
-            # timestamp w/ tz
-            1184: (1184, FC_BINARY, timestamptz_send_integer),
-            datetime.timedelta: (1186, FC_BINARY, interval_send_integer),
-            Interval: (1186, FC_BINARY, interval_send_integer),
-            Decimal: (1700, FC_TEXT, numeric_out),  # Decimal
-            UUID: (2950, FC_BINARY, uuid_send),  # uuid
-        }
+                self._usock = ssl_context.wrap_socket(self._usock, server_hostname=host)
 
-        self.inspect_funcs = {
-            datetime.datetime: self.inspect_datetime,
-            list: self.array_inspect,
-            tuple: self.array_inspect,
-        }
+                if request_ssl:
+                    self.channel_binding = scramp.make_channel_binding(
+                        "tls-server-end-point", self._usock
+                    )
 
-        if PY2:
-            self.py_types[Bytea] = (17, FC_BINARY, bytea_send)  # bytea
-            self.py_types[text_type] = (705, FC_TEXT, text_out)  # unknown
-            self.py_types[str] = (705, FC_TEXT, bytea_send)  # unknown
+            except ImportError:
+                raise InterfaceError(
+                    "SSL required but ssl module not available in this python "
+                    "installation."
+                )
 
-            self.py_types[long] = (705, FC_TEXT, unknown_out)  # noqa
-        else:
-            self.py_types[bytes] = (17, FC_BINARY, bytea_send)  # bytea
-            self.py_types[str] = (705, FC_TEXT, text_out)  # unknown
+        self._sock = self._usock.makefile(mode="rwb")
 
-        try:
-            from ipaddress import (
-                ip_address, IPv4Address, IPv6Address, ip_network, IPv4Network,
-                IPv6Network)
-
-            def inet_out(v):
-                return str(v).encode(self._client_encoding)
-
-            def inet_in(data, offset, length):
-                inet_str = data[offset: offset + length].decode(
-                    self._client_encoding)
-                if '/' in inet_str:
-                    return ip_network(inet_str, False)
-                else:
-                    return ip_address(inet_str)
-
-            self.py_types[IPv4Address] = (869, FC_TEXT, inet_out)  # inet
-            self.py_types[IPv6Address] = (869, FC_TEXT, inet_out)  # inet
-            self.py_types[IPv4Network] = (869, FC_TEXT, inet_out)  # inet
-            self.py_types[IPv6Network] = (869, FC_TEXT, inet_out)  # inet
-            self.pg_types[869] = (FC_TEXT, inet_in)  # inet
-        except ImportError:
-            pass
+        def sock_flush():
+            try:
+                self._sock.flush()
+            except OSError as e:
+                raise InterfaceError("network error on flush") from e
+
+        self._flush = sock_flush
+
+        def sock_read(b):
+            try:
+                return self._sock.read(b)
+            except OSError as e:
+                raise InterfaceError("network error on read") from e
+
+        self._read = sock_read
+
+        def sock_write(d):
+            try:
+                self._sock.write(d)
+            except OSError as e:
+                raise InterfaceError("network error on write") from e
+
+        self._write = sock_write
+        self._backend_key_data = None
+
+        self.pg_types = defaultdict(lambda: string_in, PG_TYPES)
+        self.py_types = dict(PY_TYPES)
 
         self.message_types = {
             NOTICE_RESPONSE: self.handle_NOTICE_RESPONSE,
@@ -1571,7 +316,8 @@ class Connection(object):
             COPY_DONE: self.handle_COPY_DONE,
             COPY_DATA: self.handle_COPY_DATA,
             COPY_IN_RESPONSE: self.handle_COPY_IN_RESPONSE,
-            COPY_OUT_RESPONSE: self.handle_COPY_OUT_RESPONSE}
+            COPY_OUT_RESPONSE: self.handle_COPY_OUT_RESPONSE,
+        }
 
         # Int32 - Message length, including self.
         # Int32(196608) - Protocol version number.  Version 3.0.
@@ -1579,176 +325,157 @@ class Connection(object):
         #   String - A parameter name (user, database, or options)
         #   String - Parameter value
         protocol = 196608
-        val = bytearray(
-            i_pack(protocol) + b("user\x00") + self.user + NULL_BYTE)
-        if database is not None:
-            if isinstance(database, text_type):
-                database = database.encode('utf8')
-            val.extend(b("database\x00") + database + NULL_BYTE)
+        val = bytearray(i_pack(protocol))
+
+        for k, v in init_params.items():
+            val.extend(k.encode("ascii") + NULL_BYTE + v + NULL_BYTE)
         val.append(0)
         self._write(i_pack(len(val) + 4))
         self._write(val)
         self._flush()
 
-        self._cursor = self.cursor()
-        with self._lock:
-            try:
-                code = self.error = None
-                while code not in (READY_FOR_QUERY, ERROR_RESPONSE):
-                    code, data_len = ci_unpack(self._read(5))
-                    self.message_types[code](self._read(data_len - 4), None)
-                if self.error is not None:
-                    raise self.error
-            except Exception as e:
-                try:
-                    self._close()
-                except Exception:
-                    pass
-                raise e
+        code = self.error = None
+        while code not in (READY_FOR_QUERY, ERROR_RESPONSE):
+            code, data_len = ci_unpack(self._read(5))
+            self.message_types[code](self._read(data_len - 4), None)
+        if self.error is not None:
+            raise self.error
 
         self.in_transaction = False
-        self.notifies = []
-        self.notifies_lock = threading.Lock()
 
-    def handle_ERROR_RESPONSE(self, data, ps):
-        responses = tuple(
-            (s[0:1], s[1:].decode(self._client_encoding)) for s in
-            data.split(NULL_BYTE))
-        msg_dict = dict(responses)
-        if msg_dict[RESPONSE_CODE] == "28000":
-            self.error = InterfaceError("md5 password authentication failed")
-        else:
-            self.error = ProgrammingError(*tuple(v for k, v in responses))
+    def register_out_adapter(self, typ, out_func):
+        self.py_types[typ] = out_func
+
+    def register_in_adapter(self, oid, in_func):
+        self.pg_types[oid] = in_func
+
+    def handle_ERROR_RESPONSE(self, data, context):
+        msg = dict(
+            (
+                s[:1].decode("ascii"),
+                s[1:].decode(self._client_encoding, errors="replace"),
+            )
+            for s in data.split(NULL_BYTE)
+            if s != b""
+        )
 
-    def handle_EMPTY_QUERY_RESPONSE(self, data, ps):
-        self.error = ProgrammingError("query was empty")
+        self.error = DatabaseError(msg)
 
-    def handle_CLOSE_COMPLETE(self, data, ps):
+    def handle_EMPTY_QUERY_RESPONSE(self, data, context):
+        self.error = DatabaseError("query was empty")
+
+    def handle_CLOSE_COMPLETE(self, data, context):
         pass
 
-    def handle_PARSE_COMPLETE(self, data, ps):
+    def handle_PARSE_COMPLETE(self, data, context):
         # Byte1('1') - Identifier.
         # Int32(4) - Message length, including self.
         pass
 
-    def handle_BIND_COMPLETE(self, data, ps):
+    def handle_BIND_COMPLETE(self, data, context):
         pass
 
-    def handle_PORTAL_SUSPENDED(self, data, cursor):
-        cursor.portal_suspended = True
+    def handle_PORTAL_SUSPENDED(self, data, context):
+        pass
 
-    def handle_PARAMETER_DESCRIPTION(self, data, ps):
-        # Well, we don't really care -- we're going to send whatever we
-        # want and let the database deal with it.  But thanks anyways!
+    def handle_PARAMETER_DESCRIPTION(self, data, context):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
 
         # count = h_unpack(data)[0]
-        # type_oids = unpack_from("!" + "i" * count, data, 2)
-        pass
+        # context.parameter_oids = unpack_from("!" + "i" * count, data, 2)
 
-    def handle_COPY_DONE(self, data, ps):
-        self._copy_done = True
+    def handle_COPY_DONE(self, data, context):
+        pass
 
-    def handle_COPY_OUT_RESPONSE(self, data, ps):
-        # Int8(1) - 0 textual, 1 binary
-        # Int16(2) - Number of columns
-        # Int16(N) - Format codes for each column (0 text, 1 binary)
+    def handle_COPY_OUT_RESPONSE(self, data, context):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
 
         is_binary, num_cols = bh_unpack(data)
         # column_formats = unpack_from('!' + 'h' * num_cols, data, 3)
-        if ps.stream is None:
+
+        if context.stream is None:
             raise InterfaceError(
-                "An output stream is required for the COPY OUT response.")
+                "An output stream is required for the COPY OUT response."
+            )
+
+        elif isinstance(context.stream, TextIOBase):
+            if is_binary:
+                raise InterfaceError(
+                    "The COPY OUT stream is binary, but the stream parameter is text."
+                )
+            else:
+                decode = codecs.getdecoder(self._client_encoding)
+
+                def w(data):
+                    context.stream.write(decode(data)[0])
+
+                context.stream_write = w
+
+        else:
+            context.stream_write = context.stream.write
 
-    def handle_COPY_DATA(self, data, ps):
-        ps.stream.write(data)
+    def handle_COPY_DATA(self, data, context):
+        context.stream_write(data)
 
-    def handle_COPY_IN_RESPONSE(self, data, ps):
-        # Int16(2) - Number of columns
-        # Int16(N) - Format codes for each column (0 text, 1 binary)
+    def handle_COPY_IN_RESPONSE(self, data, context):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
         is_binary, num_cols = bh_unpack(data)
         # column_formats = unpack_from('!' + 'h' * num_cols, data, 3)
-        assert self._lock.locked()
-        if ps.stream is None:
+
+        if context.stream is None:
             raise InterfaceError(
-                "An input stream is required for the COPY IN response.")
+                "An input stream is required for the COPY IN response."
+            )
+
+        elif isinstance(context.stream, TextIOBase):
+            if is_binary:
+                raise InterfaceError(
+                    "The COPY IN stream is binary, but the stream parameter is text."
+                )
 
-        if PY2:
-            while True:
-                data = ps.stream.read(8192)
-                if not data:
-                    break
-                self._write(COPY_DATA + i_pack(len(data) + 4))
-                self._write(data)
-                self._flush()
+            else:
+
+                def ri(bffr):
+                    bffr.clear()
+                    bffr.extend(context.stream.read(4096).encode(self._client_encoding))
+                    return len(bffr)
+
+                readinto = ri
         else:
-            bffr = bytearray(8192)
-            while True:
-                bytes_read = ps.stream.readinto(bffr)
-                if bytes_read == 0:
-                    break
-                self._write(COPY_DATA + i_pack(bytes_read + 4))
-                self._write(bffr[:bytes_read])
-                self._flush()
+            readinto = context.stream.readinto
+
+        bffr = bytearray(8192)
+        while True:
+            bytes_read = readinto(bffr)
+            if bytes_read == 0:
+                break
+            self._write(COPY_DATA)
+            self._write(i_pack(bytes_read + 4))
+            self._write(bffr[:bytes_read])
+            self._flush()
 
         # Send CopyDone
-        # Byte1('c') - Identifier.
-        # Int32(4) - Message length, including self.
         self._write(COPY_DONE_MSG)
         self._write(SYNC_MSG)
         self._flush()
 
-    def handle_NOTIFICATION_RESPONSE(self, data, ps):
-        self.NotificationReceived(data)
-        ##
-        # A message sent if this connection receives a NOTIFY that it was
-        # LISTENing for.
-        # <p>
-        # Stability: Added in pg8000 v1.03.  When limited to accessing
-        # properties from a notification event dispatch, stability is
-        # guaranteed for v1.xx.
+    def handle_NOTIFICATION_RESPONSE(self, data, context):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
         backend_pid = i_unpack(data)[0]
         idx = 4
-        null = data.find(NULL_BYTE, idx) - idx
-        condition = data[idx:idx + null].decode("ascii")
-        idx += null + 1
-        null = data.find(NULL_BYTE, idx) - idx
-        # additional_info = data[idx:idx + null]
-
-        # psycopg2 compatible notification interface
-        with self.notifies_lock:
-            self.notifies.append((backend_pid, condition))
-
-    def cursor(self):
-        """Creates a :class:`Cursor` object bound to this
-        connection.
+        null_idx = data.find(NULL_BYTE, idx)
+        channel = data[idx:null_idx].decode("ascii")
+        payload = data[null_idx + 1 : -1].decode("ascii")
 
-        This function is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        return Cursor(self)
-
-    def commit(self):
-        """Commits the current database transaction.
-
-        This function is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        with self._lock:
-            self.execute(self._cursor, "commit", None)
+        self.notifications.append((backend_pid, channel, payload))
 
-    def rollback(self):
-        """Rolls back the current database transaction.
+    def close(self):
+        """Closes the database connection.
 
         This function is part of the `DBAPI 2.0 specification
         <http://www.python.org/dev/peps/pep-0249/>`_.
         """
-        with self._lock:
-            self.execute(self._cursor, "rollback", None)
-
-    def _close(self):
         try:
-            # Byte1('X') - Identifies the message as a terminate message.
-            # Int32(4) - Message length, including self.
             self._write(TERMINATE_MSG)
             self._flush()
             self._sock.close()
@@ -1756,275 +483,214 @@ class Connection(object):
             raise InterfaceError("connection is closed")
         except ValueError:
             raise InterfaceError("connection is closed")
-        except socket.error as e:
-            raise OperationalError(str(e))
+        except socket.error:
+            pass
         finally:
             self._usock.close()
             self._sock = None
 
-    def close(self):
-        """Closes the database connection.
+    def handle_AUTHENTICATION_REQUEST(self, data, context):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
 
-        This function is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        with self._lock:
-            self._close()
-
-    def handle_AUTHENTICATION_REQUEST(self, data, cursor):
-        assert self._lock.locked()
-        # Int32 -   An authentication code that represents different
-        #           authentication messages:
-        #               0 = AuthenticationOk
-        #               5 = MD5 pwd
-        #               2 = Kerberos v5 (not supported by pg8000)
-        #               3 = Cleartext pwd
-        #               4 = crypt() pwd (not supported by pg8000)
-        #               6 = SCM credential (not supported by pg8000)
-        #               7 = GSSAPI (not supported by pg8000)
-        #               8 = GSSAPI data (not supported by pg8000)
-        #               9 = SSPI (not supported by pg8000)
-        # Some authentication messages have additional data following the
-        # authentication code.  That data is documented in the appropriate
-        # class.
         auth_code = i_unpack(data)[0]
         if auth_code == 0:
             pass
         elif auth_code == 3:
             if self.password is None:
                 raise InterfaceError(
-                    "server requesting password authentication, but no "
-                    "password was provided")
+                    "server requesting password authentication, but no password was "
+                    "provided"
+                )
             self._send_message(PASSWORD, self.password + NULL_BYTE)
             self._flush()
+
         elif auth_code == 5:
-            ##
-            # A message representing the backend requesting an MD5 hashed
-            # password response.  The response will be sent as
-            # md5(md5(pwd + login) + salt).
-
-            # Additional message data:
-            #  Byte4 - Hash salt.
-            salt = b("").join(cccc_unpack(data, 4))
+            salt = b"".join(cccc_unpack(data, 4))
             if self.password is None:
                 raise InterfaceError(
-                    "server requesting MD5 password authentication, but no "
-                    "password was provided")
-            pwd = b("md5") + md5(
-                md5(self.password + self.user).hexdigest().encode("ascii") +
-                salt).hexdigest().encode("ascii")
-            # Byte1('p') - Identifies the message as a password message.
-            # Int32 - Message length including self.
-            # String - The password.  Password may be encrypted.
+                    "server requesting MD5 password authentication, but no password "
+                    "was provided"
+                )
+            pwd = b"md5" + md5(
+                md5(self.password + self.user).hexdigest().encode("ascii") + salt
+            ).hexdigest().encode("ascii")
             self._send_message(PASSWORD, pwd + NULL_BYTE)
             self._flush()
 
+        elif auth_code == 10:
+            # AuthenticationSASL
+            mechanisms = [m.decode("ascii") for m in data[4:-2].split(NULL_BYTE)]
+
+            self.auth = scramp.ScramClient(
+                mechanisms,
+                self.user.decode("utf8"),
+                self.password.decode("utf8"),
+                channel_binding=self.channel_binding,
+            )
+
+            init = self.auth.get_client_first().encode("utf8")
+            mech = self.auth.mechanism_name.encode("ascii") + NULL_BYTE
+
+            # SASLInitialResponse
+            self._send_message(PASSWORD, mech + i_pack(len(init)) + init)
+            self._flush()
+
+        elif auth_code == 11:
+            # AuthenticationSASLContinue
+            self.auth.set_server_first(data[4:].decode("utf8"))
+
+            # SASLResponse
+            msg = self.auth.get_client_final().encode("utf8")
+            self._send_message(PASSWORD, msg)
+            self._flush()
+
+        elif auth_code == 12:
+            # AuthenticationSASLFinal
+            self.auth.set_server_final(data[4:].decode("utf8"))
+
         elif auth_code in (2, 4, 6, 7, 8, 9):
             raise InterfaceError(
-                "Authentication method " + str(auth_code) +
-                " not supported by pg8000.")
+                f"Authentication method {auth_code} not supported by pg8000."
+            )
         else:
             raise InterfaceError(
-                "Authentication method " + str(auth_code) +
-                " not recognized by pg8000.")
+                f"Authentication method {auth_code} not recognized by pg8000."
+            )
 
-    def handle_READY_FOR_QUERY(self, data, ps):
-        # Byte1 -   Status indicator.
+    def handle_READY_FOR_QUERY(self, data, context):
         self.in_transaction = data != IDLE
 
-    def handle_BACKEND_KEY_DATA(self, data, ps):
+    def handle_BACKEND_KEY_DATA(self, data, context):
         self._backend_key_data = data
 
-    def inspect_datetime(self, value):
-        if value.tzinfo is None:
-            return self.py_types[1114]  # timestamp
-        else:
-            return self.py_types[1184]  # send as timestamptz
-
-    def make_params(self, values):
-        params = []
-        for value in values:
-            typ = type(value)
-            try:
-                params.append(self.py_types[typ])
-            except KeyError:
-                try:
-                    params.append(self.inspect_funcs[typ](value))
-                except KeyError as e:
-                    raise NotSupportedError(
-                        "type " + str(e) + "not mapped to pg type")
-        return tuple(params)
-
-    def handle_ROW_DESCRIPTION(self, data, cursor):
+    def handle_ROW_DESCRIPTION(self, data, context):
         count = h_unpack(data)[0]
         idx = 2
+        columns = []
+        input_funcs = []
         for i in range(count):
-            name = data[idx:data.find(NULL_BYTE, idx)]
+            name = data[idx : data.find(NULL_BYTE, idx)]
             idx += len(name) + 1
             field = dict(
-                zip((
-                    "table_oid", "column_attrnum", "type_oid", "type_size",
-                    "type_modifier", "format"), ihihih_unpack(data, idx)))
-            field['name'] = name
+                zip(
+                    (
+                        "table_oid",
+                        "column_attrnum",
+                        "type_oid",
+                        "type_size",
+                        "type_modifier",
+                        "format",
+                    ),
+                    ihihih_unpack(data, idx),
+                )
+            )
+            field["name"] = name.decode(self._client_encoding)
             idx += 18
-            cursor.ps['row_desc'].append(field)
-            field['pg8000_fc'], field['func'] = \
-                self.pg_types[field['type_oid']]
-
-    def execute(self, cursor, operation, vals):
-        if vals is None:
-            vals = ()
-        paramstyle = pg8000.paramstyle
-        cache = self._caches[paramstyle]
+            columns.append(field)
+            input_funcs.append(self.pg_types[field["type_oid"]])
 
-        try:
-            statement, make_args = cache['statement'][operation]
-        except KeyError:
-            statement, make_args = convert_paramstyle(paramstyle, operation)
-            cache['statement'][operation] = statement, make_args
-
-        args = make_args(vals)
-        params = self.make_params(args)
-        key = operation, params
+        context.columns = columns
+        context.input_funcs = input_funcs
+        if context.rows is None:
+            context.rows = []
 
-        try:
-            ps = cache['ps'][key]
-            cursor.ps = ps
-        except KeyError:
-            statement_name = "pg8000_statement_" + str(self.statement_number)
-            self.statement_number += 1
-            statement_name_bin = statement_name.encode('ascii') + NULL_BYTE
-            ps = {
-                'row_desc': [],
-                'param_funcs': tuple(x[2] for x in params),
-            }
-            cursor.ps = ps
-
-            param_fcs = tuple(x[1] for x in params)
-
-            # Byte1('P') - Identifies the message as a Parse command.
-            # Int32 -   Message length, including self.
-            # String -  Prepared statement name. An empty string selects the
-            #           unnamed prepared statement.
-            # String -  The query string.
-            # Int16 -   Number of parameter data types specified (can be zero).
-            # For each parameter:
-            #   Int32 - The OID of the parameter data type.
-            val = bytearray(statement_name_bin)
-            val.extend(statement.encode(self._client_encoding) + NULL_BYTE)
-            val.extend(h_pack(len(params)))
-            for oid, fc, send_func in params:
-                # Parse message doesn't seem to handle the -1 type_oid for NULL
-                # values that other messages handle.  So we'll provide type_oid
-                # 705, the PG "unknown" type.
-                val.extend(i_pack(705 if oid == -1 else oid))
-
-            # Byte1('D') - Identifies the message as a describe command.
-            # Int32 - Message length, including self.
-            # Byte1 - 'S' for prepared statement, 'P' for portal.
-            # String - The name of the item to describe.
-            self._send_message(PARSE, val)
-            self._send_message(DESCRIBE, STATEMENT + statement_name_bin)
-            self._write(SYNC_MSG)
+    def send_PARSE(self, statement_name_bin, statement, oids=()):
 
-            try:
-                self._flush()
-            except AttributeError as e:
-                if self._sock is None:
-                    raise InterfaceError("connection is closed")
-                else:
-                    raise e
-            except socket.error as e:
-                raise OperationalError(str(e))
+        val = bytearray(statement_name_bin)
+        val.extend(statement.encode(self._client_encoding) + NULL_BYTE)
+        val.extend(h_pack(len(oids)))
+        for oid in oids:
+            val.extend(i_pack(0 if oid == -1 else oid))
 
-            self.handle_messages(cursor)
+        self._send_message(PARSE, val)
+        self._write(FLUSH_MSG)
 
-            # We've got row_desc that allows us to identify what we're
-            # going to get back from this statement.
-            output_fc = tuple(
-                self.pg_types[f['type_oid']][0] for f in ps['row_desc'])
-
-            ps['input_funcs'] = tuple(f['func'] for f in ps['row_desc'])
-            # Byte1('B') - Identifies the Bind command.
-            # Int32 - Message length, including self.
-            # String - Name of the destination portal.
-            # String - Name of the source prepared statement.
-            # Int16 - Number of parameter format codes.
-            # For each parameter format code:
-            #   Int16 - The parameter format code.
-            # Int16 - Number of parameter values.
-            # For each parameter value:
-            #   Int32 - The length of the parameter value, in bytes, not
-            #           including this length.  -1 indicates a NULL parameter
-            #           value, in which no value bytes follow.
-            #   Byte[n] - Value of the parameter.
-            # Int16 - The number of result-column format codes.
-            # For each result-column format code:
-            #   Int16 - The format code.
-            ps['bind_1'] = statement_name_bin + h_pack(len(params)) + \
-                pack("!" + "h" * len(param_fcs), *param_fcs) + \
-                h_pack(len(params))
-
-            ps['bind_2'] = h_pack(len(output_fc)) + \
-                pack("!" + "h" * len(output_fc), *output_fc)
-
-            cache['ps'][key] = ps
-
-        cursor._cached_rows.clear()
-        cursor._row_count = -1
-        cursor.portal_name = "pg8000_portal_" + str(self.portal_number)
-        self.portal_number += 1
-        cursor.portal_name_bin = cursor.portal_name.encode('ascii') + NULL_BYTE
-        cursor.execute_msg = cursor.portal_name_bin + \
-            Connection._row_cache_size_bin
+    def send_DESCRIBE_STATEMENT(self, statement_name_bin):
+        self._send_message(DESCRIBE, STATEMENT + statement_name_bin)
+        self._write(FLUSH_MSG)
 
-        # Byte1('B') - Identifies the Bind command.
-        # Int32 - Message length, including self.
-        # String - Name of the destination portal.
-        # String - Name of the source prepared statement.
-        # Int16 - Number of parameter format codes.
-        # For each parameter format code:
-        #   Int16 - The parameter format code.
-        # Int16 - Number of parameter values.
-        # For each parameter value:
-        #   Int32 - The length of the parameter value, in bytes, not
-        #           including this length.  -1 indicates a NULL parameter
-        #           value, in which no value bytes follow.
-        #   Byte[n] - Value of the parameter.
-        # Int16 - The number of result-column format codes.
-        # For each result-column format code:
-        #   Int16 - The format code.
-        retval = bytearray(cursor.portal_name_bin + ps['bind_1'])
-        for value, send_func in zip(args, ps['param_funcs']):
-            if value is None:
-                val = NULL
+    def send_QUERY(self, sql):
+        self._send_message(QUERY, sql.encode(self._client_encoding) + NULL_BYTE)
+
+    def execute_simple(self, statement):
+        context = Context()
+
+        self.send_QUERY(statement)
+        self._flush()
+        self.handle_messages(context)
+
+        return context
+
+    def execute_unnamed(self, statement, vals=(), oids=(), stream=None):
+        context = Context(stream=stream)
+
+        self.send_PARSE(NULL_BYTE, statement, oids)
+        self._write(SYNC_MSG)
+        self._flush()
+        self.handle_messages(context)
+        self.send_DESCRIBE_STATEMENT(NULL_BYTE)
+
+        self._write(SYNC_MSG)
+
+        try:
+            self._flush()
+        except AttributeError as e:
+            if self._sock is None:
+                raise InterfaceError("connection is closed")
             else:
-                val = send_func(value)
-                retval.extend(i_pack(len(val)))
-            retval.extend(val)
-        retval.extend(ps['bind_2'])
+                raise e
+        params = make_params(self.py_types, vals)
+        self.send_BIND(NULL_BYTE, params)
+        self.handle_messages(context)
+        self.send_EXECUTE()
 
-        self._send_message(BIND, retval)
-        self.send_EXECUTE(cursor)
         self._write(SYNC_MSG)
         self._flush()
-        self.handle_messages(cursor)
-        if cursor.portal_suspended:
-            if self.autocommit:
-                raise InterfaceError(
-                    "With autocommit on, it's not possible to retrieve more "
-                    "rows than the pg8000 cache size, as the portal is closed "
-                    "when the transaction is closed.")
+        self.handle_messages(context)
 
-        else:
-            self.close_portal(cursor)
+        return context
+
+    def prepare_statement(self, statement, oids=None):
+
+        for i in count():
+            statement_name = f"pg8000_statement_{i}"
+            statement_name_bin = statement_name.encode("ascii") + NULL_BYTE
+            if statement_name_bin not in self._statement_nums:
+                self._statement_nums.add(statement_name_bin)
+                break
+
+        self.send_PARSE(statement_name_bin, statement, oids)
+        self.send_DESCRIBE_STATEMENT(statement_name_bin)
+        self._write(SYNC_MSG)
+
+        try:
+            self._flush()
+        except AttributeError as e:
+            if self._sock is None:
+                raise InterfaceError("connection is closed")
+            else:
+                raise e
+
+        context = Context()
+        self.handle_messages(context)
+
+        return statement_name_bin, context.columns, context.input_funcs
+
+    def execute_named(self, statement_name_bin, params, columns, input_funcs):
+        context = Context(columns=columns, input_funcs=input_funcs)
+
+        self.send_BIND(statement_name_bin, params)
+        self.send_EXECUTE()
+        self._write(SYNC_MSG)
+        self._flush()
+        self.handle_messages(context)
+        return context
 
     def _send_message(self, code, data):
         try:
             self._write(code)
             self._write(i_pack(len(data) + 4))
             self._write(data)
-            self._write(FLUSH_MSG)
         except ValueError as e:
             if str(e) == "write to closed file":
                 raise InterfaceError("connection is closed")
@@ -2033,475 +699,110 @@ class Connection(object):
         except AttributeError:
             raise InterfaceError("connection is closed")
 
-    def send_EXECUTE(self, cursor):
-        # Byte1('E') - Identifies the message as an execute message.
-        # Int32 -   Message length, including self.
-        # String -  The name of the portal to execute.
-        # Int32 -   Maximum number of rows to return, if portal
-        #           contains a query # that returns rows.
-        #           0 = no limit.
-        cursor.portal_suspended = False
-        self._send_message(EXECUTE, cursor.execute_msg)
+    def send_BIND(self, statement_name_bin, params):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
+
+        retval = bytearray(
+            NULL_BYTE + statement_name_bin + h_pack(0) + h_pack(len(params))
+        )
 
-    def handle_NO_DATA(self, msg, ps):
+        for value in params:
+            if value is None:
+                retval.extend(i_pack(-1))
+            else:
+                val = value.encode(self._client_encoding)
+                retval.extend(i_pack(len(val)))
+                retval.extend(val)
+        retval.extend(h_pack(0))
+
+        self._send_message(BIND, retval)
+        self._write(FLUSH_MSG)
+
+    def send_EXECUTE(self):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
+        self._write(EXECUTE_MSG)
+        self._write(FLUSH_MSG)
+
+    def handle_NO_DATA(self, msg, context):
         pass
 
-    def handle_COMMAND_COMPLETE(self, data, cursor):
-        values = data[:-1].split(BINARY_SPACE)
-        command = values[0]
-        if command in self._commands_with_count:
+    def handle_COMMAND_COMPLETE(self, data, context):
+        values = data[:-1].split(b" ")
+        try:
             row_count = int(values[-1])
-            if cursor._row_count == -1:
-                cursor._row_count = row_count
+            if context.row_count == -1:
+                context.row_count = row_count
             else:
-                cursor._row_count += row_count
-
-        if command in DDL_COMMANDS:
-            for k in self._caches:
-                self._caches[k]['ps'].clear()
+                context.row_count += row_count
+        except ValueError:
+            pass
 
-    def handle_DATA_ROW(self, data, cursor):
-        data_idx = 2
+    def handle_DATA_ROW(self, data, context):
+        idx = 2
         row = []
-        for func in cursor.ps['input_funcs']:
-            vlen = i_unpack(data, data_idx)[0]
-            data_idx += 4
+        for func in context.input_funcs:
+            vlen = i_unpack(data, idx)[0]
+            idx += 4
             if vlen == -1:
-                row.append(None)
+                v = None
             else:
-                row.append(func(data, data_idx, vlen))
-                data_idx += vlen
-        cursor._cached_rows.append(row)
+                v = func(str(data[idx : idx + vlen], encoding=self._client_encoding))
+                idx += vlen
+            row.append(v)
+        context.rows.append(row)
 
-    def handle_messages(self, cursor):
+    def handle_messages(self, context):
         code = self.error = None
 
-        try:
-            while code != READY_FOR_QUERY:
+        while code != READY_FOR_QUERY:
+
+            try:
                 code, data_len = ci_unpack(self._read(5))
-                self.message_types[code](self._read(data_len - 4), cursor)
-        except:
-            self._close()
-            raise
+            except struct.error as e:
+                raise InterfaceError("network error on read") from e
+
+            self.message_types[code](self._read(data_len - 4), context)
 
         if self.error is not None:
             raise self.error
 
-    # Byte1('C') - Identifies the message as a close command.
-    # Int32 - Message length, including self.
-    # Byte1 - 'S' for prepared statement, 'P' for portal.
-    # String - The name of the item to close.
-    def close_portal(self, cursor):
-        self._send_message(CLOSE, PORTAL + cursor.portal_name_bin)
+    def close_prepared_statement(self, statement_name_bin):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
+        self._send_message(CLOSE, STATEMENT + statement_name_bin)
+        self._write(FLUSH_MSG)
         self._write(SYNC_MSG)
         self._flush()
-        self.handle_messages(cursor)
+        context = Context()
+        self.handle_messages(context)
+        self._statement_nums.remove(statement_name_bin)
+
+    def handle_NOTICE_RESPONSE(self, data, context):
+        """https://www.postgresql.org/docs/current/protocol-message-formats.html"""
+        self.notices.append(dict((s[0:1], s[1:]) for s in data.split(NULL_BYTE)))
 
-    # Byte1('N') - Identifier
-    # Int32 - Message length
-    # Any number of these, followed by a zero byte:
-    #   Byte1 - code identifying the field type (see responseKeys)
-    #   String - field value
-    def handle_NOTICE_RESPONSE(self, data, ps):
-        resp = dict((s[0:1], s[1:]) for s in data.split(NULL_BYTE))
-        self.NoticeReceived(resp)
-
-    def handle_PARAMETER_STATUS(self, data, ps):
+    def handle_PARAMETER_STATUS(self, data, context):
         pos = data.find(NULL_BYTE)
-        key, value = data[:pos], data[pos + 1:-1]
-        if key == b("client_encoding"):
+        key, value = data[:pos], data[pos + 1 : -1]
+        self.parameter_statuses.append((key, value))
+        if key == b"client_encoding":
             encoding = value.decode("ascii").lower()
-            self._client_encoding = pg_to_py_encodings.get(encoding, encoding)
-
-        elif key == b("integer_datetimes"):
-            if value == b('on'):
-
-                self.py_types[1114] = (1114, FC_BINARY, timestamp_send_integer)
-                self.pg_types[1114] = (FC_BINARY, timestamp_recv_integer)
-
-                self.py_types[1184] = (
-                    1184, FC_BINARY, timestamptz_send_integer)
-                self.pg_types[1184] = (FC_BINARY, timestamptz_recv_integer)
-
-                self.py_types[Interval] = (
-                    1186, FC_BINARY, interval_send_integer)
-                self.py_types[datetime.timedelta] = (
-                    1186, FC_BINARY, interval_send_integer)
-                self.pg_types[1186] = (FC_BINARY, interval_recv_integer)
-            else:
-                self.py_types[1114] = (1114, FC_BINARY, timestamp_send_float)
-                self.pg_types[1114] = (FC_BINARY, timestamp_recv_float)
-                self.py_types[1184] = (1184, FC_BINARY, timestamptz_send_float)
-                self.pg_types[1184] = (FC_BINARY, timestamptz_recv_float)
-
-                self.py_types[Interval] = (
-                    1186, FC_BINARY, interval_send_float)
-                self.py_types[datetime.timedelta] = (
-                    1186, FC_BINARY, interval_send_float)
-                self.pg_types[1186] = (FC_BINARY, interval_recv_float)
-
-        elif key == b("server_version"):
-            self._server_version = LooseVersion(value.decode('ascii'))
-            if self._server_version < LooseVersion('8.2.0'):
-                self._commands_with_count = (
-                    b("INSERT"), b("DELETE"), b("UPDATE"), b("MOVE"),
-                    b("FETCH"))
-            elif self._server_version < LooseVersion('9.0.0'):
-                self._commands_with_count = (
-                    b("INSERT"), b("DELETE"), b("UPDATE"), b("MOVE"),
-                    b("FETCH"), b("COPY"))
-
-    def array_inspect(self, value):
-        # Check if array has any values.  If not, we can't determine the proper
-        # array oid.
-        first_element = array_find_first_element(value)
-        if first_element is None:
-            raise ArrayContentEmptyError("array has no values")
-
-        # supported array output
-        typ = type(first_element)
-
-        if issubclass(typ, integer_types):
-            # special int array support -- send as smallest possible array type
-            typ = integer_types
-            int2_ok, int4_ok, int8_ok = True, True, True
-            for v in array_flatten(value):
-                if v is None:
-                    continue
-                if min_int2 < v < max_int2:
-                    continue
-                int2_ok = False
-                if min_int4 < v < max_int4:
-                    continue
-                int4_ok = False
-                if min_int8 < v < max_int8:
-                    continue
-                int8_ok = False
-            if int2_ok:
-                array_oid = 1005  # INT2[]
-                oid, fc, send_func = (21, FC_BINARY, h_pack)
-            elif int4_ok:
-                array_oid = 1007  # INT4[]
-                oid, fc, send_func = (23, FC_BINARY, i_pack)
-            elif int8_ok:
-                array_oid = 1016  # INT8[]
-                oid, fc, send_func = (20, FC_BINARY, q_pack)
-            else:
-                raise ArrayContentNotSupportedError(
-                    "numeric not supported as array contents")
-        else:
-            try:
-                oid, fc, send_func = self.make_params((first_element,))[0]
-
-                # If unknown, assume it's a string array
-                if oid == 705:
-                    oid = 25
-                    # Use binary ARRAY format to avoid having to properly
-                    # escape text in the array literals
-                    fc = FC_BINARY
-                array_oid = pg_array_types[oid]
-            except KeyError:
-                raise ArrayContentNotSupportedError(
-                    "oid " + str(oid) + " not supported as array contents")
-            except NotSupportedError:
-                raise ArrayContentNotSupportedError(
-                    "type " + str(typ) + " not supported as array contents")
-
-        if fc == FC_BINARY:
-            def send_array(arr):
-                # check for homogenous array
-                for a, i, v in walk_array(arr):
-                    if not isinstance(v, (typ, type(None))):
-                        raise ArrayContentNotHomogenousError(
-                            "not all array elements are of type " + str(typ))
-
-                # check that all array dimensions are consistent
-                array_check_dimensions(arr)
-
-                has_null = array_has_null(arr)
-                dim_lengths = array_dim_lengths(arr)
-                data = bytearray(iii_pack(len(dim_lengths), has_null, oid))
-                for i in dim_lengths:
-                    data.extend(ii_pack(i, 1))
-                for v in array_flatten(arr):
-                    if v is None:
-                        data += i_pack(-1)
-                    else:
-                        inner_data = send_func(v)
-                        data += i_pack(len(inner_data))
-                        data += inner_data
-                return data
-        else:
-            def send_array(arr):
-                for a, i, v in walk_array(arr):
-                    if not isinstance(v, (typ, type(None))):
-                        raise ArrayContentNotHomogenousError(
-                            "not all array elements are of type " + str(typ))
-                array_check_dimensions(arr)
-                ar = deepcopy(arr)
-                for a, i, v in walk_array(ar):
-                    if v is None:
-                        a[i] = 'NULL'
-                    else:
-                        a[i] = send_func(v).decode('ascii')
-
-                return u(str(ar)).translate(arr_trans).encode('ascii')
-        return (array_oid, fc, send_array)
-
-    def xid(self, format_id, global_transaction_id, branch_qualifier):
-        """Create a Transaction IDs (only global_transaction_id is used in pg)
-        format_id and branch_qualifier are not used in postgres
-        global_transaction_id may be any string identifier supported by
-        postgres returns a tuple
-        (format_id, global_transaction_id, branch_qualifier)"""
-        return (format_id, global_transaction_id, branch_qualifier)
-
-    def tpc_begin(self, xid):
-        """Begins a TPC transaction with the given transaction ID xid.
-
-        This method should be called outside of a transaction (i.e. nothing may
-        have executed since the last .commit() or .rollback()).
-
-        Furthermore, it is an error to call .commit() or .rollback() within the
-        TPC transaction. A ProgrammingError is raised, if the application calls
-        .commit() or .rollback() during an active TPC transaction.
-
-        This function is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        self._xid = xid
-        if self.autocommit:
-            self.execute(self._cursor, "begin transaction", None)
-
-    def tpc_prepare(self):
-        """Performs the first phase of a transaction started with .tpc_begin().
-        A ProgrammingError is be raised if this method is called outside of a
-        TPC transaction.
-
-        After calling .tpc_prepare(), no statements can be executed until
-        .tpc_commit() or .tpc_rollback() have been called.
-
-        This function is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        q = "PREPARE TRANSACTION '%s';" % (self._xid[1],)
-        self.execute(self._cursor, q, None)
+            self._client_encoding = PG_PY_ENCODINGS.get(encoding, encoding)
 
-    def tpc_commit(self, xid=None):
-        """When called with no arguments, .tpc_commit() commits a TPC
-        transaction previously prepared with .tpc_prepare().
-
-        If .tpc_commit() is called prior to .tpc_prepare(), a single phase
-        commit is performed. A transaction manager may choose to do this if
-        only a single resource is participating in the global transaction.
-
-        When called with a transaction ID xid, the database commits the given
-        transaction. If an invalid transaction ID is provided, a
-        ProgrammingError will be raised. This form should be called outside of
-        a transaction, and is intended for use in recovery.
+        elif key == b"integer_datetimes":
+            if value == b"on":
+                pass
 
-        On return, the TPC transaction is ended.
-
-        This function is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        if xid is None:
-            xid = self._xid
-
-        if xid is None:
-            raise ProgrammingError(
-                "Cannot tpc_commit() without a TPC transaction!")
-
-        try:
-            previous_autocommit_mode = self.autocommit
-            self.autocommit = True
-            if xid in self.tpc_recover():
-                self.execute(
-                    self._cursor, "COMMIT PREPARED '%s';" % (xid[1], ),
-                    None)
             else:
-                # a single-phase commit
-                self.commit()
-        finally:
-            self.autocommit = previous_autocommit_mode
-        self._xid = None
+                pass
 
-    def tpc_rollback(self, xid=None):
-        """When called with no arguments, .tpc_rollback() rolls back a TPC
-        transaction. It may be called before or after .tpc_prepare().
-
-        When called with a transaction ID xid, it rolls back the given
-        transaction. If an invalid transaction ID is provided, a
-        ProgrammingError is raised. This form should be called outside of a
-        transaction, and is intended for use in recovery.
-
-        On return, the TPC transaction is ended.
-
-        This function is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        if xid is None:
-            xid = self._xid
-
-        if xid is None:
-            raise ProgrammingError(
-                "Cannot tpc_rollback() without a TPC prepared transaction!")
-
-        try:
-            previous_autocommit_mode = self.autocommit
-            self.autocommit = True
-            if xid in self.tpc_recover():
-                # a two-phase rollback
-                self.execute(
-                    self._cursor, "ROLLBACK PREPARED '%s';" % (xid[1],),
-                    None)
-            else:
-                # a single-phase rollback
-                self.rollback()
-        finally:
-            self.autocommit = previous_autocommit_mode
-        self._xid = None
-
-    def tpc_recover(self):
-        """Returns a list of pending transaction IDs suitable for use with
-        .tpc_commit(xid) or .tpc_rollback(xid).
-
-        This function is part of the `DBAPI 2.0 specification
-        <http://www.python.org/dev/peps/pep-0249/>`_.
-        """
-        try:
-            previous_autocommit_mode = self.autocommit
-            self.autocommit = True
-            curs = self.cursor()
-            curs.execute("select gid FROM pg_prepared_xacts")
-            return [self.xid(0, row[0], '') for row in curs]
-        finally:
-            self.autocommit = previous_autocommit_mode
-
-# pg element oid -> pg array typeoid
-pg_array_types = {
-    16: 1000,
-    25: 1009,    # TEXT[]
-    701: 1022,
-    1700: 1231,  # NUMERIC[]
-}
-
-
-# PostgreSQL encodings:
-#   http://www.postgresql.org/docs/8.3/interactive/multibyte.html
-# Python encodings:
-#   http://www.python.org/doc/2.4/lib/standard-encodings.html
-#
-# Commented out encodings don't require a name change between PostgreSQL and
-# Python.  If the py side is None, then the encoding isn't supported.
-pg_to_py_encodings = {
-    # Not supported:
-    "mule_internal": None,
-    "euc_tw": None,
-
-    # Name fine as-is:
-    # "euc_jp",
-    # "euc_jis_2004",
-    # "euc_kr",
-    # "gb18030",
-    # "gbk",
-    # "johab",
-    # "sjis",
-    # "shift_jis_2004",
-    # "uhc",
-    # "utf8",
-
-    # Different name:
-    "euc_cn": "gb2312",
-    "iso_8859_5": "is8859_5",
-    "iso_8859_6": "is8859_6",
-    "iso_8859_7": "is8859_7",
-    "iso_8859_8": "is8859_8",
-    "koi8": "koi8_r",
-    "latin1": "iso8859-1",
-    "latin2": "iso8859_2",
-    "latin3": "iso8859_3",
-    "latin4": "iso8859_4",
-    "latin5": "iso8859_9",
-    "latin6": "iso8859_10",
-    "latin7": "iso8859_13",
-    "latin8": "iso8859_14",
-    "latin9": "iso8859_15",
-    "sql_ascii": "ascii",
-    "win866": "cp886",
-    "win874": "cp874",
-    "win1250": "cp1250",
-    "win1251": "cp1251",
-    "win1252": "cp1252",
-    "win1253": "cp1253",
-    "win1254": "cp1254",
-    "win1255": "cp1255",
-    "win1256": "cp1256",
-    "win1257": "cp1257",
-    "win1258": "cp1258",
-    "unicode": "utf-8",  # Needed for Amazon Redshift
-}
-
-
-def walk_array(arr):
-    for i, v in enumerate(arr):
-        if isinstance(v, list):
-            for a, i2, v2 in walk_array(v):
-                yield a, i2, v2
-        else:
-            yield arr, i, v
-
-
-def array_find_first_element(arr):
-    for v in array_flatten(arr):
-        if v is not None:
-            return v
-    return None
-
-
-def array_flatten(arr):
-    for v in arr:
-        if isinstance(v, list):
-            for v2 in array_flatten(v):
-                yield v2
-        else:
-            yield v
+        elif key == b"server_version":
+            pass
 
 
-def array_check_dimensions(arr):
-    v0 = arr[0]
-    if isinstance(v0, list):
-        req_len = len(v0)
-        req_inner_lengths = array_check_dimensions(v0)
-        for v in arr:
-            inner_lengths = array_check_dimensions(v)
-            if len(v) != req_len or inner_lengths != req_inner_lengths:
-                raise ArrayDimensionsNotConsistentError(
-                    "array dimensions not consistent")
-        retval = [req_len]
-        retval.extend(req_inner_lengths)
-        return retval
-    else:
-        # make sure nothing else at this level is a list
-        for v in arr:
-            if isinstance(v, list):
-                raise ArrayDimensionsNotConsistentError(
-                    "array dimensions not consistent")
-        return []
-
-
-def array_has_null(arr):
-    for v in array_flatten(arr):
-        if v is None:
-            return True
-    return False
-
-
-def array_dim_lengths(arr):
-    v0 = arr[0]
-    if isinstance(v0, list):
-        retval = [len(v0)]
-        retval.extend(array_dim_lengths(v0))
-    else:
-        return [len(arr)]
-    return retval
+class Context:
+    def __init__(self, stream=None, columns=None, input_funcs=None):
+        self.rows = None if columns is None else []
+        self.row_count = -1
+        self.columns = columns
+        self.stream = stream
+        self.input_funcs = [] if input_funcs is None else input_funcs
diff -pruN 1.10.6-3/pg8000/dbapi.py 1.23.0-0ubuntu1/pg8000/dbapi.py
--- 1.10.6-3/pg8000/dbapi.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000/dbapi.py	2021-11-13 10:03:15.000000000 +0000
@@ -0,0 +1,953 @@
+from datetime import date as Date, datetime as Datetime, time as Time
+from itertools import count, islice
+from time import localtime
+from warnings import warn
+
+import pg8000
+from pg8000.converters import (
+    BIGINT,
+    BOOLEAN,
+    BOOLEAN_ARRAY,
+    BYTES,
+    CHAR,
+    CHAR_ARRAY,
+    DATE,
+    FLOAT,
+    FLOAT_ARRAY,
+    INET,
+    INT2VECTOR,
+    INTEGER,
+    INTEGER_ARRAY,
+    INTERVAL,
+    JSON,
+    JSONB,
+    MACADDR,
+    NAME,
+    NAME_ARRAY,
+    NULLTYPE,
+    NUMERIC,
+    NUMERIC_ARRAY,
+    OID,
+    PGInterval,
+    STRING,
+    TEXT,
+    TEXT_ARRAY,
+    TIME,
+    TIMESTAMP,
+    TIMESTAMPTZ,
+    UNKNOWN,
+    UUID_TYPE,
+    VARCHAR,
+    VARCHAR_ARRAY,
+    XID,
+)
+from pg8000.core import Context, CoreConnection
+from pg8000.exceptions import DatabaseError, Error, InterfaceError
+
+
+from ._version import get_versions
+
+__version__ = get_versions()["version"]
+del get_versions
+
+# Copyright (c) 2007-2009, Mathieu Fenniak
+# Copyright (c) The Contributors
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+__author__ = "Mathieu Fenniak"
+
+
+ROWID = OID
+
+apilevel = "2.0"
+"""The DBAPI level supported, currently "2.0".
+
+This property is part of the `DBAPI 2.0 specification
+<http://www.python.org/dev/peps/pep-0249/>`_.
+"""
+
+threadsafety = 1
+"""Integer constant stating the level of thread safety the DBAPI interface
+supports. This DBAPI module supports sharing of the module only. Connections
+and cursors my not be shared between threads. This gives pg8000 a threadsafety
+value of 1.
+
+This property is part of the `DBAPI 2.0 specification
+<http://www.python.org/dev/peps/pep-0249/>`_.
+"""
+
+paramstyle = "format"
+
+
+BINARY = bytes
+
+
+def PgDate(year, month, day):
+    """Constuct an object holding a date value.
+
+    This function is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+
+    :rtype: :class:`datetime.date`
+    """
+    return Date(year, month, day)
+
+
+def PgTime(hour, minute, second):
+    """Construct an object holding a time value.
+
+    This function is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+
+    :rtype: :class:`datetime.time`
+    """
+    return Time(hour, minute, second)
+
+
+def Timestamp(year, month, day, hour, minute, second):
+    """Construct an object holding a timestamp value.
+
+    This function is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+
+    :rtype: :class:`datetime.datetime`
+    """
+    return Datetime(year, month, day, hour, minute, second)
+
+
+def DateFromTicks(ticks):
+    """Construct an object holding a date value from the given ticks value
+    (number of seconds since the epoch).
+
+    This function is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+
+    :rtype: :class:`datetime.date`
+    """
+    return Date(*localtime(ticks)[:3])
+
+
+def TimeFromTicks(ticks):
+    """Construct an objet holding a time value from the given ticks value
+    (number of seconds since the epoch).
+
+    This function is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+
+    :rtype: :class:`datetime.time`
+    """
+    return Time(*localtime(ticks)[3:6])
+
+
+def TimestampFromTicks(ticks):
+    """Construct an object holding a timestamp value from the given ticks value
+    (number of seconds since the epoch).
+
+    This function is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+
+    :rtype: :class:`datetime.datetime`
+    """
+    return Timestamp(*localtime(ticks)[:6])
+
+
+def Binary(value):
+    """Construct an object holding binary data.
+
+    This function is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+
+    """
+    return value
+
+
+def connect(
+    user,
+    host="localhost",
+    database=None,
+    port=5432,
+    password=None,
+    source_address=None,
+    unix_sock=None,
+    ssl_context=None,
+    timeout=None,
+    tcp_keepalive=True,
+    application_name=None,
+    replication=None,
+):
+
+    return Connection(
+        user,
+        host=host,
+        database=database,
+        port=port,
+        password=password,
+        source_address=source_address,
+        unix_sock=unix_sock,
+        ssl_context=ssl_context,
+        timeout=timeout,
+        tcp_keepalive=tcp_keepalive,
+        application_name=application_name,
+        replication=replication,
+    )
+
+
+apilevel = "2.0"
+"""The DBAPI level supported, currently "2.0".
+
+This property is part of the `DBAPI 2.0 specification
+<http://www.python.org/dev/peps/pep-0249/>`_.
+"""
+
+threadsafety = 1
+"""Integer constant stating the level of thread safety the DBAPI interface
+supports. This DBAPI module supports sharing of the module only. Connections
+and cursors my not be shared between threads. This gives pg8000 a threadsafety
+value of 1.
+
+This property is part of the `DBAPI 2.0 specification
+<http://www.python.org/dev/peps/pep-0249/>`_.
+"""
+
+paramstyle = "format"
+
+
+def convert_paramstyle(style, query, args):
+    # I don't see any way to avoid scanning the query string char by char,
+    # so we might as well take that careful approach and create a
+    # state-based scanner.  We'll use int variables for the state.
+    OUTSIDE = 0  # outside quoted string
+    INSIDE_SQ = 1  # inside single-quote string '...'
+    INSIDE_QI = 2  # inside quoted identifier   "..."
+    INSIDE_ES = 3  # inside escaped single-quote string, E'...'
+    INSIDE_PN = 4  # inside parameter name eg. :name
+    INSIDE_CO = 5  # inside inline comment eg. --
+
+    in_quote_escape = False
+    in_param_escape = False
+    placeholders = []
+    output_query = []
+    param_idx = map(lambda x: "$" + str(x), count(1))
+    state = OUTSIDE
+    prev_c = None
+    for i, c in enumerate(query):
+        if i + 1 < len(query):
+            next_c = query[i + 1]
+        else:
+            next_c = None
+
+        if state == OUTSIDE:
+            if c == "'":
+                output_query.append(c)
+                if prev_c == "E":
+                    state = INSIDE_ES
+                else:
+                    state = INSIDE_SQ
+            elif c == '"':
+                output_query.append(c)
+                state = INSIDE_QI
+            elif c == "-":
+                output_query.append(c)
+                if prev_c == "-":
+                    state = INSIDE_CO
+            elif style == "qmark" and c == "?":
+                output_query.append(next(param_idx))
+            elif (
+                style == "numeric" and c == ":" and next_c not in ":=" and prev_c != ":"
+            ):
+                # Treat : as beginning of parameter name if and only
+                # if it's the only : around
+                # Needed to properly process type conversions
+                # i.e. sum(x)::float
+                output_query.append("$")
+            elif style == "named" and c == ":" and next_c not in ":=" and prev_c != ":":
+                # Same logic for : as in numeric parameters
+                state = INSIDE_PN
+                placeholders.append("")
+            elif style == "pyformat" and c == "%" and next_c == "(":
+                state = INSIDE_PN
+                placeholders.append("")
+            elif style in ("format", "pyformat") and c == "%":
+                style = "format"
+                if in_param_escape:
+                    in_param_escape = False
+                    output_query.append(c)
+                else:
+                    if next_c == "%":
+                        in_param_escape = True
+                    elif next_c == "s":
+                        state = INSIDE_PN
+                        output_query.append(next(param_idx))
+                    else:
+                        raise InterfaceError(
+                            "Only %s and %% are supported in the query."
+                        )
+            else:
+                output_query.append(c)
+
+        elif state == INSIDE_SQ:
+            if c == "'":
+                if in_quote_escape:
+                    in_quote_escape = False
+                else:
+                    if next_c == "'":
+                        in_quote_escape = True
+                    else:
+                        state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_QI:
+            if c == '"':
+                state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_ES:
+            if c == "'" and prev_c != "\\":
+                # check for escaped single-quote
+                state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_PN:
+            if style == "named":
+                placeholders[-1] += c
+                if next_c is None or (not next_c.isalnum() and next_c != "_"):
+                    state = OUTSIDE
+                    try:
+                        pidx = placeholders.index(placeholders[-1], 0, -1)
+                        output_query.append("$" + str(pidx + 1))
+                        del placeholders[-1]
+                    except ValueError:
+                        output_query.append("$" + str(len(placeholders)))
+            elif style == "pyformat":
+                if prev_c == ")" and c == "s":
+                    state = OUTSIDE
+                    try:
+                        pidx = placeholders.index(placeholders[-1], 0, -1)
+                        output_query.append("$" + str(pidx + 1))
+                        del placeholders[-1]
+                    except ValueError:
+                        output_query.append("$" + str(len(placeholders)))
+                elif c in "()":
+                    pass
+                else:
+                    placeholders[-1] += c
+            elif style == "format":
+                state = OUTSIDE
+
+        elif state == INSIDE_CO:
+            output_query.append(c)
+            if c == "\n":
+                state = OUTSIDE
+
+        prev_c = c
+
+    if style in ("numeric", "qmark", "format"):
+        vals = args
+    else:
+        vals = tuple(args[p] for p in placeholders)
+
+    return "".join(output_query), vals
+
+
+class Cursor:
+    def __init__(self, connection):
+        self._c = connection
+        self.arraysize = 1
+
+        self._context = None
+        self._row_iter = None
+
+        self._input_oids = ()
+
+    @property
+    def connection(self):
+        warn("DB-API extension cursor.connection used", stacklevel=3)
+        return self._c
+
+    @property
+    def rowcount(self):
+        context = self._context
+        if context is None:
+            return -1
+
+        return context.row_count
+
+    @property
+    def description(self):
+        context = self._context
+        if context is None:
+            return None
+
+        row_desc = context.columns
+        if row_desc is None:
+            return None
+        if len(row_desc) == 0:
+            return None
+        columns = []
+        for col in row_desc:
+            columns.append((col["name"], col["type_oid"], None, None, None, None, None))
+        return columns
+
+    ##
+    # Executes a database operation.  Parameters may be provided as a sequence
+    # or mapping and will be bound to variables in the operation.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    def execute(self, operation, args=(), stream=None):
+        """Executes a database operation.  Parameters may be provided as a
+        sequence, or as a mapping, depending upon the value of
+        :data:`pg8000.paramstyle`.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :param operation:
+            The SQL statement to execute.
+
+        :param args:
+            If :data:`paramstyle` is ``qmark``, ``numeric``, or ``format``,
+            this argument should be an array of parameters to bind into the
+            statement.  If :data:`paramstyle` is ``named``, the argument should
+            be a dict mapping of parameters.  If the :data:`paramstyle` is
+            ``pyformat``, the argument value may be either an array or a
+            mapping.
+
+        :param stream: This is a pg8000 extension for use with the PostgreSQL
+            `COPY
+            <http://www.postgresql.org/docs/current/static/sql-copy.html>`_
+            command. For a COPY FROM the parameter must be a readable file-like
+            object, and for COPY TO it must be writable.
+
+            .. versionadded:: 1.9.11
+        """
+        try:
+            if not self._c.in_transaction and not self._c.autocommit:
+                self._c.execute_simple("begin transaction")
+
+            if len(args) == 0 and stream is None:
+                self._context = self._c.execute_simple(operation)
+            else:
+                statement, vals = convert_paramstyle(paramstyle, operation, args)
+                self._context = self._c.execute_unnamed(
+                    statement, vals=vals, oids=self._input_oids, stream=stream
+                )
+
+            if self._context.rows is None:
+                self._row_iter = None
+            else:
+                self._row_iter = iter(self._context.rows)
+            self._input_oids = ()
+        except AttributeError as e:
+            if self._c is None:
+                raise InterfaceError("Cursor closed")
+            elif self._c._sock is None:
+                raise InterfaceError("connection is closed")
+            else:
+                raise e
+
+        self.input_types = []
+
+    def executemany(self, operation, param_sets):
+        """Prepare a database operation, and then execute it against all
+        parameter sequences or mappings provided.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :param operation:
+            The SQL statement to execute
+        :param parameter_sets:
+            A sequence of parameters to execute the statement with. The values
+            in the sequence should be sequences or mappings of parameters, the
+            same as the args argument of the :meth:`execute` method.
+        """
+        rowcounts = []
+        input_oids = self._input_oids
+        for parameters in param_sets:
+            self._input_oids = input_oids
+            self.execute(operation, parameters)
+            rowcounts.append(self._context.row_count)
+
+        if len(rowcounts) == 0:
+            self._context = Context()
+        elif -1 in rowcounts:
+            self._context.row_count = -1
+        else:
+            self._context.row_count = sum(rowcounts)
+
+    def callproc(self, procname, parameters=None):
+        args = [] if parameters is None else parameters
+        operation = f"CALL {procname}(" + ", ".join(["%s" for _ in args]) + ")"
+
+        try:
+
+            statement, vals = convert_paramstyle("format", operation, args)
+
+            self._context = self._c.execute_unnamed(statement, vals=vals)
+
+            if self._context.rows is None:
+                self._row_iter = None
+            else:
+                self._row_iter = iter(self._context.rows)
+
+        except AttributeError as e:
+            if self._c is None:
+                raise InterfaceError("Cursor closed")
+            elif self._c._sock is None:
+                raise InterfaceError("connection is closed")
+            else:
+                raise e
+
+    def fetchone(self):
+        """Fetch the next row of a query result set.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :returns:
+            A row as a sequence of field values, or ``None`` if no more rows
+            are available.
+        """
+        try:
+            return next(self)
+        except StopIteration:
+            return None
+        except TypeError:
+            raise ProgrammingError("attempting to use unexecuted cursor")
+
+    def __iter__(self):
+        """A cursor object is iterable to retrieve the rows from a query.
+
+        This is a DBAPI 2.0 extension.
+        """
+        return self
+
+    def __next__(self):
+        try:
+            return next(self._row_iter)
+        except AttributeError:
+            if self._context is None:
+                raise ProgrammingError("A query hasn't been issued.")
+            else:
+                raise
+        except StopIteration as e:
+            if self._context is None:
+                raise ProgrammingError("A query hasn't been issued.")
+            elif len(self._context.columns) == 0:
+                raise ProgrammingError("no result set")
+            else:
+                raise e
+
+    def fetchmany(self, num=None):
+        """Fetches the next set of rows of a query result.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :param size:
+
+            The number of rows to fetch when called.  If not provided, the
+            :attr:`arraysize` attribute value is used instead.
+
+        :returns:
+
+            A sequence, each entry of which is a sequence of field values
+            making up a row.  If no more rows are available, an empty sequence
+            will be returned.
+        """
+        try:
+            return tuple(islice(self, self.arraysize if num is None else num))
+        except TypeError:
+            raise ProgrammingError("attempting to use unexecuted cursor")
+
+    def fetchall(self):
+        """Fetches all remaining rows of a query result.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :returns:
+
+            A sequence, each entry of which is a sequence of field values
+            making up a row.
+        """
+        try:
+            return tuple(self)
+        except TypeError:
+            raise ProgrammingError("attempting to use unexecuted cursor")
+
+    def close(self):
+        """Closes the cursor.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        self._c = None
+
+    def setinputsizes(self, *sizes):
+        """This method is part of the `DBAPI 2.0 specification"""
+        oids = []
+        for size in sizes:
+            if isinstance(size, int):
+                oid = size
+            else:
+                try:
+                    oid, _ = self._c.py_types[size]
+                except KeyError:
+                    oid = pg8000.converters.UNKNOWN
+            oids.append(oid)
+
+        self._input_oids = oids
+
+    def setoutputsize(self, size, column=None):
+        """This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_, however, it is not
+        implemented by pg8000.
+        """
+        pass
+
+
+class Connection(CoreConnection):
+
+    # DBAPI Extension: supply exceptions as attributes on the connection
+    Warning = property(lambda self: self._getError(Warning))
+    Error = property(lambda self: self._getError(Error))
+    InterfaceError = property(lambda self: self._getError(InterfaceError))
+    DatabaseError = property(lambda self: self._getError(DatabaseError))
+    OperationalError = property(lambda self: self._getError(OperationalError))
+    IntegrityError = property(lambda self: self._getError(IntegrityError))
+    InternalError = property(lambda self: self._getError(InternalError))
+    ProgrammingError = property(lambda self: self._getError(ProgrammingError))
+    NotSupportedError = property(lambda self: self._getError(NotSupportedError))
+
+    def _getError(self, error):
+        warn(f"DB-API extension connection.{error.__name__} used", stacklevel=3)
+        return error
+
+    def cursor(self):
+        """Creates a :class:`Cursor` object bound to this
+        connection.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        return Cursor(self)
+
+    def commit(self):
+        """Commits the current database transaction.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        self.execute_unnamed("commit")
+
+    def rollback(self):
+        """Rolls back the current database transaction.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        if not self.in_transaction:
+            return
+        self.execute_unnamed("rollback")
+
+    def xid(self, format_id, global_transaction_id, branch_qualifier):
+        """Create a Transaction IDs (only global_transaction_id is used in pg)
+        format_id and branch_qualifier are not used in postgres
+        global_transaction_id may be any string identifier supported by
+        postgres returns a tuple
+        (format_id, global_transaction_id, branch_qualifier)"""
+        return (format_id, global_transaction_id, branch_qualifier)
+
+    def tpc_begin(self, xid):
+        """Begins a TPC transaction with the given transaction ID xid.
+
+        This method should be called outside of a transaction (i.e. nothing may
+        have executed since the last .commit() or .rollback()).
+
+        Furthermore, it is an error to call .commit() or .rollback() within the
+        TPC transaction. A ProgrammingError is raised, if the application calls
+        .commit() or .rollback() during an active TPC transaction.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        self._xid = xid
+        if self.autocommit:
+            self.execute_unnamed("begin transaction")
+
+    def tpc_prepare(self):
+        """Performs the first phase of a transaction started with .tpc_begin().
+        A ProgrammingError is be raised if this method is called outside of a
+        TPC transaction.
+
+        After calling .tpc_prepare(), no statements can be executed until
+        .tpc_commit() or .tpc_rollback() have been called.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        self.execute_unnamed("PREPARE TRANSACTION '%s';" % (self._xid[1],))
+
+    def tpc_commit(self, xid=None):
+        """When called with no arguments, .tpc_commit() commits a TPC
+        transaction previously prepared with .tpc_prepare().
+
+        If .tpc_commit() is called prior to .tpc_prepare(), a single phase
+        commit is performed. A transaction manager may choose to do this if
+        only a single resource is participating in the global transaction.
+
+        When called with a transaction ID xid, the database commits the given
+        transaction. If an invalid transaction ID is provided, a
+        ProgrammingError will be raised. This form should be called outside of
+        a transaction, and is intended for use in recovery.
+
+        On return, the TPC transaction is ended.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        if xid is None:
+            xid = self._xid
+
+        if xid is None:
+            raise ProgrammingError("Cannot tpc_commit() without a TPC transaction!")
+
+        try:
+            previous_autocommit_mode = self.autocommit
+            self.autocommit = True
+            if xid in self.tpc_recover():
+                self.execute_unnamed("COMMIT PREPARED '%s';" % (xid[1],))
+            else:
+                # a single-phase commit
+                self.commit()
+        finally:
+            self.autocommit = previous_autocommit_mode
+        self._xid = None
+
+    def tpc_rollback(self, xid=None):
+        """When called with no arguments, .tpc_rollback() rolls back a TPC
+        transaction. It may be called before or after .tpc_prepare().
+
+        When called with a transaction ID xid, it rolls back the given
+        transaction. If an invalid transaction ID is provided, a
+        ProgrammingError is raised. This form should be called outside of a
+        transaction, and is intended for use in recovery.
+
+        On return, the TPC transaction is ended.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        if xid is None:
+            xid = self._xid
+
+        if xid is None:
+            raise ProgrammingError(
+                "Cannot tpc_rollback() without a TPC prepared transaction!"
+            )
+
+        try:
+            previous_autocommit_mode = self.autocommit
+            self.autocommit = True
+            if xid in self.tpc_recover():
+                # a two-phase rollback
+                self.execute_unnamed("ROLLBACK PREPARED '%s';" % (xid[1],))
+            else:
+                # a single-phase rollback
+                self.rollback()
+        finally:
+            self.autocommit = previous_autocommit_mode
+        self._xid = None
+
+    def tpc_recover(self):
+        """Returns a list of pending transaction IDs suitable for use with
+        .tpc_commit(xid) or .tpc_rollback(xid).
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        try:
+            previous_autocommit_mode = self.autocommit
+            self.autocommit = True
+            curs = self.cursor()
+            curs.execute("select gid FROM pg_prepared_xacts")
+            return [self.xid(0, row[0], "") for row in curs.fetchall()]
+        finally:
+            self.autocommit = previous_autocommit_mode
+
+
+class Warning(Exception):
+    """Generic exception raised for important database warnings like data
+    truncations.  This exception is not currently used by pg8000.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class DataError(DatabaseError):
+    """Generic exception raised for errors that are due to problems with the
+    processed data.  This exception is not currently raised by pg8000.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class OperationalError(DatabaseError):
+    """
+    Generic exception raised for errors that are related to the database's
+    operation and not necessarily under the control of the programmer. This
+    exception is currently never raised by pg8000.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class IntegrityError(DatabaseError):
+    """
+    Generic exception raised when the relational integrity of the database is
+    affected.  This exception is not currently raised by pg8000.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class InternalError(DatabaseError):
+    """Generic exception raised when the database encounters an internal error.
+    This is currently only raised when unexpected state occurs in the pg8000
+    interface itself, and is typically the result of a interface bug.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class ProgrammingError(DatabaseError):
+    """Generic exception raised for programming errors.  For example, this
+    exception is raised if more parameter fields are in a query string than
+    there are available parameters.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class NotSupportedError(DatabaseError):
+    """Generic exception raised in case a method or database API was used which
+    is not supported by the database.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class ArrayContentNotSupportedError(NotSupportedError):
+    """
+    Raised when attempting to transmit an array where the base type is not
+    supported for binary data transfer by the interface.
+    """
+
+    pass
+
+
+__all__ = [
+    "BIGINT",
+    "BINARY",
+    "BOOLEAN",
+    "BOOLEAN_ARRAY",
+    "BYTES",
+    "Binary",
+    "CHAR",
+    "CHAR_ARRAY",
+    "Connection",
+    "Cursor",
+    "DATE",
+    "DataError",
+    "DatabaseError",
+    "Date",
+    "DateFromTicks",
+    "Error",
+    "FLOAT",
+    "FLOAT_ARRAY",
+    "INET",
+    "INT2VECTOR",
+    "INTEGER",
+    "INTEGER_ARRAY",
+    "INTERVAL",
+    "IntegrityError",
+    "InterfaceError",
+    "InternalError",
+    "JSON",
+    "JSONB",
+    "MACADDR",
+    "NAME",
+    "NAME_ARRAY",
+    "NULLTYPE",
+    "NUMERIC",
+    "NUMERIC_ARRAY",
+    "NotSupportedError",
+    "OID",
+    "OperationalError",
+    "PGInterval",
+    "ProgrammingError",
+    "ROWID",
+    "STRING",
+    "TEXT",
+    "TEXT_ARRAY",
+    "TIME",
+    "TIMESTAMP",
+    "TIMESTAMPTZ",
+    "Time",
+    "TimeFromTicks",
+    "Timestamp",
+    "TimestampFromTicks",
+    "UNKNOWN",
+    "UUID_TYPE",
+    "VARCHAR",
+    "VARCHAR_ARRAY",
+    "Warning",
+    "XID",
+    "connect",
+]
diff -pruN 1.10.6-3/pg8000/exceptions.py 1.23.0-0ubuntu1/pg8000/exceptions.py
--- 1.10.6-3/pg8000/exceptions.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000/exceptions.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,32 @@
+class Error(Exception):
+    """Generic exception that is the base exception of all other error
+    exceptions.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class InterfaceError(Error):
+    """Generic exception raised for errors that are related to the database
+    interface rather than the database itself.  For example, if the interface
+    attempts to use an SSL connection but the server refuses, an InterfaceError
+    will be raised.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
+
+
+class DatabaseError(Error):
+    """Generic exception raised for errors that are related to the database.
+
+    This exception is part of the `DBAPI 2.0 specification
+    <http://www.python.org/dev/peps/pep-0249/>`_.
+    """
+
+    pass
diff -pruN 1.10.6-3/pg8000/__init__.py 1.23.0-0ubuntu1/pg8000/__init__.py
--- 1.10.6-3/pg8000/__init__.py	2016-06-10 09:43:57.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000/__init__.py	2021-07-30 12:10:36.000000000 +0000
@@ -1,15 +1,75 @@
-from .core import (
-    Warning, Bytea, DataError, DatabaseError, InterfaceError, ProgrammingError,
-    Error, OperationalError, IntegrityError, InternalError, NotSupportedError,
-    ArrayContentNotHomogenousError, ArrayContentEmptyError,
-    ArrayDimensionsNotConsistentError, ArrayContentNotSupportedError, utc,
-    Connection, Cursor, Binary, Date, DateFromTicks, Time, TimeFromTicks,
-    Timestamp, TimestampFromTicks, BINARY, Interval)
+from pg8000.legacy import (
+    BIGINTEGER,
+    BINARY,
+    BOOLEAN,
+    BOOLEAN_ARRAY,
+    BYTES,
+    Binary,
+    CHAR,
+    CHAR_ARRAY,
+    Connection,
+    Cursor,
+    DATE,
+    DATETIME,
+    DECIMAL,
+    DECIMAL_ARRAY,
+    DataError,
+    DatabaseError,
+    Date,
+    DateFromTicks,
+    Error,
+    FLOAT,
+    FLOAT_ARRAY,
+    INET,
+    INT2VECTOR,
+    INTEGER,
+    INTEGER_ARRAY,
+    INTERVAL,
+    IntegrityError,
+    InterfaceError,
+    InternalError,
+    JSON,
+    JSONB,
+    MACADDR,
+    NAME,
+    NAME_ARRAY,
+    NULLTYPE,
+    NUMBER,
+    NotSupportedError,
+    OID,
+    OperationalError,
+    PGInterval,
+    ProgrammingError,
+    ROWID,
+    STRING,
+    TEXT,
+    TEXT_ARRAY,
+    TIME,
+    TIMEDELTA,
+    TIMESTAMP,
+    TIMESTAMPTZ,
+    Time,
+    TimeFromTicks,
+    Timestamp,
+    TimestampFromTicks,
+    UNKNOWN,
+    UUID_TYPE,
+    VARCHAR,
+    VARCHAR_ARRAY,
+    Warning,
+    XID,
+    pginterval_in,
+    pginterval_out,
+    timedelta_in,
+)
+
 from ._version import get_versions
-__version__ = get_versions()['version']
+
+__version__ = get_versions()["version"]
 del get_versions
 
 # Copyright (c) 2007-2009, Mathieu Fenniak
+# Copyright (c) The Contributors
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -40,69 +100,35 @@ __author__ = "Mathieu Fenniak"
 
 
 def connect(
-        user=None, host='localhost', unix_sock=None, port=5432, database=None,
-        password=None, ssl=False, timeout=None, **kwargs):
-    """Creates a connection to a PostgreSQL database.
-
-    This function is part of the `DBAPI 2.0 specification
-    <http://www.python.org/dev/peps/pep-0249/>`_; however, the arguments of the
-    function are not defined by the specification.
-
-    :param user:
-        The username to connect to the PostgreSQL server with.
-
-        If your server character encoding is not ``ascii`` or ``utf8``, then
-        you need to provide ``user`` as bytes, eg.
-        ``"my_name".encode('EUC-JP')``.
-
-    :keyword host:
-        The hostname of the PostgreSQL server to connect with.  Providing this
-        parameter is necessary for TCP/IP connections.  One of either ``host``
-        or ``unix_sock`` must be provided. The default is ``localhost``.
-
-    :keyword unix_sock:
-        The path to the UNIX socket to access the database through, for
-        example, ``'/tmp/.s.PGSQL.5432'``.  One of either ``host`` or
-        ``unix_sock`` must be provided.
-
-    :keyword port:
-        The TCP/IP port of the PostgreSQL server instance.  This parameter
-        defaults to ``5432``, the registered common port of PostgreSQL TCP/IP
-        servers.
-
-    :keyword database:
-        The name of the database instance to connect with.  This parameter is
-        optional; if omitted, the PostgreSQL server will assume the database
-        name is the same as the username.
-
-        If your server character encoding is not ``ascii`` or ``utf8``, then
-        you need to provide ``database`` as bytes, eg.
-        ``"my_db".encode('EUC-JP')``.
-
-    :keyword password:
-        The user password to connect to the server with.  This parameter is
-        optional; if omitted and the database server requests password-based
-        authentication, the connection will fail to open.  If this parameter
-        is provided but not requested by the server, no error will occur.
-
-        If your server character encoding is not ``ascii`` or ``utf8``, then
-        you need to provide ``user`` as bytes, eg.
-        ``"my_password".encode('EUC-JP')``.
-
-    :keyword ssl:
-        Use SSL encryption for TCP/IP sockets if ``True``.  Defaults to
-        ``False``.
-
-    :keyword timeout:
-        Only used with Python 3, this is the time in seconds before the
-        connection to the database will time out. The default is ``None`` which
-        means no timeout.
-
-    :rtype:
-        A :class:`Connection` object.
-    """
+    user,
+    host="localhost",
+    database=None,
+    port=5432,
+    password=None,
+    source_address=None,
+    unix_sock=None,
+    ssl_context=None,
+    timeout=None,
+    tcp_keepalive=True,
+    application_name=None,
+    replication=None,
+):
+
     return Connection(
-        user, host, unix_sock, port, database, password, ssl, timeout)
+        user,
+        host=host,
+        database=database,
+        port=port,
+        password=password,
+        source_address=source_address,
+        unix_sock=unix_sock,
+        ssl_context=ssl_context,
+        timeout=timeout,
+        tcp_keepalive=tcp_keepalive,
+        application_name=application_name,
+        replication=replication,
+    )
+
 
 apilevel = "2.0"
 """The DBAPI level supported, currently "2.0".
@@ -111,65 +137,81 @@ This property is part of the `DBAPI 2.0
 <http://www.python.org/dev/peps/pep-0249/>`_.
 """
 
-threadsafety = 3
+threadsafety = 1
 """Integer constant stating the level of thread safety the DBAPI interface
-supports.  This DBAPI module supports sharing the module, connections, and
-cursors, resulting in a threadsafety value of 3.
-
-This property is part of the `DBAPI 2.0 specification
-<http://www.python.org/dev/peps/pep-0249/>`_.
-"""
-
-paramstyle = 'format'
-"""String property stating the type of parameter marker formatting expected by
-the interface.  This value defaults to "format", in which parameters are
-marked in this format: "WHERE name=%s".
+supports. This DBAPI module supports sharing of the module only. Connections
+and cursors my not be shared between threads. This gives pg8000 a threadsafety
+value of 1.
 
 This property is part of the `DBAPI 2.0 specification
 <http://www.python.org/dev/peps/pep-0249/>`_.
-
-As an extension to the DBAPI specification, this value is not constant; it
-can be changed to any of the following values:
-
-    qmark
-        Question mark style, eg. ``WHERE name=?``
-    numeric
-        Numeric positional style, eg. ``WHERE name=:1``
-    named
-        Named style, eg. ``WHERE name=:paramname``
-    format
-        printf format codes, eg. ``WHERE name=%s``
-    pyformat
-        Python format codes, eg. ``WHERE name=%(paramname)s``
 """
 
-# I have no idea what this would be used for by a client app.  Should it be
-# TEXT, VARCHAR, CHAR?  It will only compare against row_description's
-# type_code if it is this one type.  It is the varchar type oid for now, this
-# appears to match expectations in the DB API 2.0 compliance test suite.
-
-STRING = 1043
-"""String type oid."""
-
-
-NUMBER = 1700
-"""Numeric type oid"""
+paramstyle = "format"
 
-DATETIME = 1114
-"""Timestamp type oid"""
-
-ROWID = 26
-"""ROWID type oid"""
 
 __all__ = [
-    Warning, Bytea, DataError, DatabaseError, connect, InterfaceError,
-    ProgrammingError, Error, OperationalError, IntegrityError, InternalError,
-    NotSupportedError, ArrayContentNotHomogenousError, ArrayContentEmptyError,
-    ArrayDimensionsNotConsistentError, ArrayContentNotSupportedError, utc,
-    Connection, Cursor, Binary, Date, DateFromTicks, Time, TimeFromTicks,
-    Timestamp, TimestampFromTicks, BINARY, Interval]
-
-"""Version string for pg8000.
-
-    .. versionadded:: 1.9.11
-"""
+    "BIGINTEGER",
+    "BINARY",
+    "BOOLEAN",
+    "BOOLEAN_ARRAY",
+    "BYTES",
+    "Binary",
+    "CHAR",
+    "CHAR_ARRAY",
+    "Connection",
+    "Cursor",
+    "DATE",
+    "DATETIME",
+    "DECIMAL",
+    "DECIMAL_ARRAY",
+    "DataError",
+    "DatabaseError",
+    "Date",
+    "DateFromTicks",
+    "Error",
+    "FLOAT",
+    "FLOAT_ARRAY",
+    "INET",
+    "INT2VECTOR",
+    "INTEGER",
+    "INTEGER_ARRAY",
+    "INTERVAL",
+    "IntegrityError",
+    "InterfaceError",
+    "InternalError",
+    "JSON",
+    "JSONB",
+    "MACADDR",
+    "NAME",
+    "NAME_ARRAY",
+    "NULLTYPE",
+    "NUMBER",
+    "NotSupportedError",
+    "OID",
+    "OperationalError",
+    "PGInterval",
+    "ProgrammingError",
+    "ROWID",
+    "STRING",
+    "TEXT",
+    "TEXT_ARRAY",
+    "TIME",
+    "TIMEDELTA",
+    "TIMESTAMP",
+    "TIMESTAMPTZ",
+    "Time",
+    "TimeFromTicks",
+    "Timestamp",
+    "TimestampFromTicks",
+    "UNKNOWN",
+    "UUID_TYPE",
+    "VARCHAR",
+    "VARCHAR_ARRAY",
+    "Warning",
+    "XID",
+    "connect",
+    "pginterval_in",
+    "pginterval_out",
+    "timedelta_in",
+]
diff -pruN 1.10.6-3/pg8000/legacy.py 1.23.0-0ubuntu1/pg8000/legacy.py
--- 1.10.6-3/pg8000/legacy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000/legacy.py	2021-11-13 10:05:54.000000000 +0000
@@ -0,0 +1,816 @@
+from datetime import date as Date, time as Time
+from itertools import islice
+from warnings import warn
+
+import pg8000
+from pg8000.converters import (
+    BIGINT,
+    BOOLEAN,
+    BOOLEAN_ARRAY,
+    BYTES,
+    CHAR,
+    CHAR_ARRAY,
+    DATE,
+    FLOAT,
+    FLOAT_ARRAY,
+    INET,
+    INT2VECTOR,
+    INTEGER,
+    INTEGER_ARRAY,
+    INTERVAL,
+    JSON,
+    JSONB,
+    MACADDR,
+    NAME,
+    NAME_ARRAY,
+    NULLTYPE,
+    NUMERIC,
+    NUMERIC_ARRAY,
+    OID,
+    PGInterval,
+    PY_PG,
+    STRING,
+    TEXT,
+    TEXT_ARRAY,
+    TIME,
+    TIMESTAMP,
+    TIMESTAMPTZ,
+    UNKNOWN,
+    UUID_TYPE,
+    VARCHAR,
+    VARCHAR_ARRAY,
+    XID,
+    interval_in as timedelta_in,
+    make_params,
+    pg_interval_in as pginterval_in,
+    pg_interval_out as pginterval_out,
+)
+from pg8000.core import Context, CoreConnection
+from pg8000.dbapi import (
+    BINARY,
+    Binary,
+    DataError,
+    DateFromTicks,
+    IntegrityError,
+    InternalError,
+    NotSupportedError,
+    OperationalError,
+    ProgrammingError,
+    TimeFromTicks,
+    Timestamp,
+    TimestampFromTicks,
+    Warning,
+    convert_paramstyle,
+)
+from pg8000.exceptions import DatabaseError, Error, InterfaceError
+
+
+from ._version import get_versions
+
+__version__ = get_versions()["version"]
+del get_versions
+
+# Copyright (c) 2007-2009, Mathieu Fenniak
+# Copyright (c) The Contributors
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+__author__ = "Mathieu Fenniak"
+
+
+BIGINTEGER = BIGINT
+DATETIME = TIMESTAMP
+NUMBER = DECIMAL = NUMERIC
+DECIMAL_ARRAY = NUMERIC_ARRAY
+ROWID = OID
+TIMEDELTA = INTERVAL
+
+
+def connect(
+    user,
+    host="localhost",
+    database=None,
+    port=5432,
+    password=None,
+    source_address=None,
+    unix_sock=None,
+    ssl_context=None,
+    timeout=None,
+    tcp_keepalive=True,
+    application_name=None,
+    replication=None,
+):
+
+    return Connection(
+        user,
+        host=host,
+        database=database,
+        port=port,
+        password=password,
+        source_address=source_address,
+        unix_sock=unix_sock,
+        ssl_context=ssl_context,
+        timeout=timeout,
+        tcp_keepalive=tcp_keepalive,
+        application_name=application_name,
+        replication=replication,
+    )
+
+
+apilevel = "2.0"
+"""The DBAPI level supported, currently "2.0".
+
+This property is part of the `DBAPI 2.0 specification
+<http://www.python.org/dev/peps/pep-0249/>`_.
+"""
+
+threadsafety = 1
+"""Integer constant stating the level of thread safety the DBAPI interface
+supports. This DBAPI module supports sharing of the module only. Connections
+and cursors my not be shared between threads. This gives pg8000 a threadsafety
+value of 1.
+
+This property is part of the `DBAPI 2.0 specification
+<http://www.python.org/dev/peps/pep-0249/>`_.
+"""
+
+paramstyle = "format"
+
+
+class Cursor:
+    def __init__(self, connection, paramstyle=None):
+        self._c = connection
+        self.arraysize = 1
+        if paramstyle is None:
+            self.paramstyle = pg8000.paramstyle
+        else:
+            self.paramstyle = paramstyle
+
+        self._context = None
+        self._row_iter = None
+
+        self._input_oids = ()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.close()
+
+    @property
+    def connection(self):
+        warn("DB-API extension cursor.connection used", stacklevel=3)
+        return self._c
+
+    @property
+    def rowcount(self):
+        context = self._context
+        if context is None:
+            return -1
+
+        return context.row_count
+
+    description = property(lambda self: self._getDescription())
+
+    def _getDescription(self):
+        context = self._context
+        if context is None:
+            return None
+        row_desc = context.columns
+        if row_desc is None:
+            return None
+        if len(row_desc) == 0:
+            return None
+        columns = []
+        for col in row_desc:
+            columns.append((col["name"], col["type_oid"], None, None, None, None, None))
+        return columns
+
+    ##
+    # Executes a database operation.  Parameters may be provided as a sequence
+    # or mapping and will be bound to variables in the operation.
+    # <p>
+    # Stability: Part of the DBAPI 2.0 specification.
+    def execute(self, operation, args=(), stream=None):
+        """Executes a database operation.  Parameters may be provided as a
+        sequence, or as a mapping, depending upon the value of
+        :data:`pg8000.paramstyle`.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :param operation:
+            The SQL statement to execute.
+
+        :param args:
+            If :data:`paramstyle` is ``qmark``, ``numeric``, or ``format``,
+            this argument should be an array of parameters to bind into the
+            statement.  If :data:`paramstyle` is ``named``, the argument should
+            be a dict mapping of parameters.  If the :data:`paramstyle` is
+            ``pyformat``, the argument value may be either an array or a
+            mapping.
+
+        :param stream: This is a pg8000 extension for use with the PostgreSQL
+            `COPY
+            <http://www.postgresql.org/docs/current/static/sql-copy.html>`_
+            command. For a COPY FROM the parameter must be a readable file-like
+            object, and for COPY TO it must be writable.
+
+            .. versionadded:: 1.9.11
+        """
+        try:
+            if not self._c.in_transaction and not self._c.autocommit:
+                self._c.execute_simple("begin transaction")
+
+            if len(args) == 0 and stream is None:
+                self._context = self._c.execute_simple(operation)
+            else:
+                statement, vals = convert_paramstyle(self.paramstyle, operation, args)
+                self._context = self._c.execute_unnamed(
+                    statement, vals=vals, oids=self._input_oids, stream=stream
+                )
+
+            rows = [] if self._context.rows is None else self._context.rows
+            self._row_iter = iter(rows)
+
+            self._input_oids = ()
+        except AttributeError as e:
+            if self._c is None:
+                raise InterfaceError("Cursor closed")
+            elif self._c._sock is None:
+                raise InterfaceError("connection is closed")
+            else:
+                raise e
+        except DatabaseError as e:
+            msg = e.args[0]
+            if isinstance(msg, dict):
+                response_code = msg["C"]
+
+                if response_code == "28000":
+                    cls = InterfaceError
+                elif response_code == "23505":
+                    cls = IntegrityError
+                else:
+                    cls = ProgrammingError
+
+                raise cls(msg)
+            else:
+                raise ProgrammingError(msg)
+
+        self.input_types = []
+        return self
+
+    def executemany(self, operation, param_sets):
+        """Prepare a database operation, and then execute it against all
+        parameter sequences or mappings provided.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :param operation:
+            The SQL statement to execute
+        :param parameter_sets:
+            A sequence of parameters to execute the statement with. The values
+            in the sequence should be sequences or mappings of parameters, the
+            same as the args argument of the :meth:`execute` method.
+        """
+        rowcounts = []
+        input_oids = self._input_oids
+        for parameters in param_sets:
+            self._input_oids = input_oids
+            self.execute(operation, parameters)
+            rowcounts.append(self._context.row_count)
+
+        if len(rowcounts) == 0:
+            self._context = Context()
+        elif -1 in rowcounts:
+            self._context.row_count = -1
+        else:
+            self._context.row_count = sum(rowcounts)
+
+        return self
+
+    def fetchone(self):
+        """Fetch the next row of a query result set.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :returns:
+            A row as a sequence of field values, or ``None`` if no more rows
+            are available.
+        """
+        try:
+            return next(self)
+        except StopIteration:
+            return None
+        except TypeError:
+            raise ProgrammingError("attempting to use unexecuted cursor")
+        except AttributeError:
+            raise ProgrammingError("attempting to use unexecuted cursor")
+
+    def fetchmany(self, num=None):
+        """Fetches the next set of rows of a query result.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :param size:
+
+            The number of rows to fetch when called.  If not provided, the
+            :attr:`arraysize` attribute value is used instead.
+
+        :returns:
+
+            A sequence, each entry of which is a sequence of field values
+            making up a row.  If no more rows are available, an empty sequence
+            will be returned.
+        """
+        try:
+            return tuple(islice(self, self.arraysize if num is None else num))
+        except TypeError:
+            raise ProgrammingError("attempting to use unexecuted cursor")
+
+    def fetchall(self):
+        """Fetches all remaining rows of a query result.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+
+        :returns:
+
+            A sequence, each entry of which is a sequence of field values
+            making up a row.
+        """
+        try:
+            return tuple(self)
+        except TypeError:
+            raise ProgrammingError("attempting to use unexecuted cursor")
+
+    def close(self):
+        """Closes the cursor.
+
+        This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        self._c = None
+
+    def __iter__(self):
+        """A cursor object is iterable to retrieve the rows from a query.
+
+        This is a DBAPI 2.0 extension.
+        """
+        return self
+
+    def setinputsizes(self, *sizes):
+        """This method is part of the `DBAPI 2.0 specification"""
+        oids = []
+        for size in sizes:
+            if isinstance(size, int):
+                oid = size
+            else:
+                try:
+                    oid = PY_PG[size]
+                except KeyError:
+                    oid = UNKNOWN
+            oids.append(oid)
+
+        self._input_oids = oids
+
+    def setoutputsize(self, size, column=None):
+        """This method is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_, however, it is not
+        implemented by pg8000.
+        """
+        pass
+
+    def __next__(self):
+        try:
+            return next(self._row_iter)
+        except AttributeError:
+            if self._context is None:
+                raise ProgrammingError("A query hasn't been issued.")
+            else:
+                raise
+        except StopIteration as e:
+            if self._context is None:
+                raise ProgrammingError("A query hasn't been issued.")
+            elif len(self._context.columns) == 0:
+                raise ProgrammingError("no result set")
+            else:
+                raise e
+
+
+class Connection(CoreConnection):
+
+    # DBAPI Extension: supply exceptions as attributes on the connection
+    Warning = property(lambda self: self._getError(Warning))
+    Error = property(lambda self: self._getError(Error))
+    InterfaceError = property(lambda self: self._getError(InterfaceError))
+    DatabaseError = property(lambda self: self._getError(DatabaseError))
+    OperationalError = property(lambda self: self._getError(OperationalError))
+    IntegrityError = property(lambda self: self._getError(IntegrityError))
+    InternalError = property(lambda self: self._getError(InternalError))
+    ProgrammingError = property(lambda self: self._getError(ProgrammingError))
+    NotSupportedError = property(lambda self: self._getError(NotSupportedError))
+
+    def __init__(self, *args, **kwargs):
+        try:
+            super().__init__(*args, **kwargs)
+        except DatabaseError as e:
+            msg = e.args[0]
+            if isinstance(msg, dict):
+                response_code = msg["C"]
+
+                if response_code == "28000":
+                    cls = InterfaceError
+                elif response_code == "23505":
+                    cls = IntegrityError
+                else:
+                    cls = ProgrammingError
+
+                raise cls(msg)
+            else:
+                raise ProgrammingError(msg)
+
+        self._run_cursor = Cursor(self, paramstyle="named")
+
+    def _getError(self, error):
+        warn("DB-API extension connection.%s used" % error.__name__, stacklevel=3)
+        return error
+
+    def cursor(self):
+        """Creates a :class:`Cursor` object bound to this
+        connection.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        return Cursor(self)
+
+    @property
+    def description(self):
+        return self._run_cursor._getDescription()
+
+    def commit(self):
+        """Commits the current database transaction.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        self.execute_unnamed("commit")
+
+    def rollback(self):
+        """Rolls back the current database transaction.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        if not self.in_transaction:
+            return
+        self.execute_unnamed("rollback")
+
+    def run(self, sql, stream=None, **params):
+        self._run_cursor.execute(sql, params, stream=stream)
+        if self._run_cursor._context.rows is None:
+            return tuple()
+        else:
+            return tuple(self._run_cursor._context.rows)
+
+    def prepare(self, operation):
+        return PreparedStatement(self, operation)
+
+    def xid(self, format_id, global_transaction_id, branch_qualifier):
+        """Create a Transaction IDs (only global_transaction_id is used in pg)
+        format_id and branch_qualifier are not used in postgres
+        global_transaction_id may be any string identifier supported by
+        postgres returns a tuple
+        (format_id, global_transaction_id, branch_qualifier)"""
+        return (format_id, global_transaction_id, branch_qualifier)
+
+    def tpc_begin(self, xid):
+        """Begins a TPC transaction with the given transaction ID xid.
+
+        This method should be called outside of a transaction (i.e. nothing may
+        have executed since the last .commit() or .rollback()).
+
+        Furthermore, it is an error to call .commit() or .rollback() within the
+        TPC transaction. A ProgrammingError is raised, if the application calls
+        .commit() or .rollback() during an active TPC transaction.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        self._xid = xid
+        if self.autocommit:
+            self.execute_unnamed("begin transaction")
+
+    def tpc_prepare(self):
+        """Performs the first phase of a transaction started with .tpc_begin().
+        A ProgrammingError is be raised if this method is called outside of a
+        TPC transaction.
+
+        After calling .tpc_prepare(), no statements can be executed until
+        .tpc_commit() or .tpc_rollback() have been called.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        q = "PREPARE TRANSACTION '%s';" % (self._xid[1],)
+        self.execute_unnamed(q)
+
+    def tpc_commit(self, xid=None):
+        """When called with no arguments, .tpc_commit() commits a TPC
+        transaction previously prepared with .tpc_prepare().
+
+        If .tpc_commit() is called prior to .tpc_prepare(), a single phase
+        commit is performed. A transaction manager may choose to do this if
+        only a single resource is participating in the global transaction.
+
+        When called with a transaction ID xid, the database commits the given
+        transaction. If an invalid transaction ID is provided, a
+        ProgrammingError will be raised. This form should be called outside of
+        a transaction, and is intended for use in recovery.
+
+        On return, the TPC transaction is ended.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        if xid is None:
+            xid = self._xid
+
+        if xid is None:
+            raise ProgrammingError("Cannot tpc_commit() without a TPC transaction!")
+
+        try:
+            previous_autocommit_mode = self.autocommit
+            self.autocommit = True
+            if xid in self.tpc_recover():
+                self.execute_unnamed("COMMIT PREPARED '%s';" % (xid[1],))
+            else:
+                # a single-phase commit
+                self.commit()
+        finally:
+            self.autocommit = previous_autocommit_mode
+        self._xid = None
+
+    def tpc_rollback(self, xid=None):
+        """When called with no arguments, .tpc_rollback() rolls back a TPC
+        transaction. It may be called before or after .tpc_prepare().
+
+        When called with a transaction ID xid, it rolls back the given
+        transaction. If an invalid transaction ID is provided, a
+        ProgrammingError is raised. This form should be called outside of a
+        transaction, and is intended for use in recovery.
+
+        On return, the TPC transaction is ended.
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        if xid is None:
+            xid = self._xid
+
+        if xid is None:
+            raise ProgrammingError(
+                "Cannot tpc_rollback() without a TPC prepared transaction!"
+            )
+
+        try:
+            previous_autocommit_mode = self.autocommit
+            self.autocommit = True
+            if xid in self.tpc_recover():
+                # a two-phase rollback
+                self.execute_unnamed("ROLLBACK PREPARED '%s';" % (xid[1],))
+            else:
+                # a single-phase rollback
+                self.rollback()
+        finally:
+            self.autocommit = previous_autocommit_mode
+        self._xid = None
+
+    def tpc_recover(self):
+        """Returns a list of pending transaction IDs suitable for use with
+        .tpc_commit(xid) or .tpc_rollback(xid).
+
+        This function is part of the `DBAPI 2.0 specification
+        <http://www.python.org/dev/peps/pep-0249/>`_.
+        """
+        try:
+            previous_autocommit_mode = self.autocommit
+            self.autocommit = True
+            curs = self.cursor()
+            curs.execute("select gid FROM pg_prepared_xacts")
+            return [self.xid(0, row[0], "") for row in curs]
+        finally:
+            self.autocommit = previous_autocommit_mode
+
+
+def to_statement(query):
+    OUTSIDE = 0  # outside quoted string
+    INSIDE_SQ = 1  # inside single-quote string '...'
+    INSIDE_QI = 2  # inside quoted identifier   "..."
+    INSIDE_ES = 3  # inside escaped single-quote string, E'...'
+    INSIDE_PN = 4  # inside parameter name eg. :name
+    INSIDE_CO = 5  # inside inline comment eg. --
+
+    in_quote_escape = False
+    placeholders = []
+    output_query = []
+    state = OUTSIDE
+    prev_c = None
+    for i, c in enumerate(query):
+        if i + 1 < len(query):
+            next_c = query[i + 1]
+        else:
+            next_c = None
+
+        if state == OUTSIDE:
+            if c == "'":
+                output_query.append(c)
+                if prev_c == "E":
+                    state = INSIDE_ES
+                else:
+                    state = INSIDE_SQ
+            elif c == '"':
+                output_query.append(c)
+                state = INSIDE_QI
+            elif c == "-":
+                output_query.append(c)
+                if prev_c == "-":
+                    state = INSIDE_CO
+            elif c == ":" and next_c not in ":=" and prev_c != ":":
+                state = INSIDE_PN
+                placeholders.append("")
+            else:
+                output_query.append(c)
+
+        elif state == INSIDE_SQ:
+            if c == "'":
+                if in_quote_escape:
+                    in_quote_escape = False
+                else:
+                    if next_c == "'":
+                        in_quote_escape = True
+                    else:
+                        state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_QI:
+            if c == '"':
+                state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_ES:
+            if c == "'" and prev_c != "\\":
+                # check for escaped single-quote
+                state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_PN:
+            placeholders[-1] += c
+            if next_c is None or (not next_c.isalnum() and next_c != "_"):
+                state = OUTSIDE
+                try:
+                    pidx = placeholders.index(placeholders[-1], 0, -1)
+                    output_query.append("$" + str(pidx + 1))
+                    del placeholders[-1]
+                except ValueError:
+                    output_query.append("$" + str(len(placeholders)))
+
+        elif state == INSIDE_CO:
+            output_query.append(c)
+            if c == "\n":
+                state = OUTSIDE
+
+        prev_c = c
+
+    def make_vals(args):
+        return tuple(args[p] for p in placeholders)
+
+    return "".join(output_query), make_vals
+
+
+class PreparedStatement:
+    def __init__(self, con, operation):
+        self.con = con
+        self.operation = operation
+        statement, self.make_args = to_statement(operation)
+        self.name_bin, self.row_desc, self.input_funcs = con.prepare_statement(
+            statement, ()
+        )
+
+    def run(self, **vals):
+
+        params = make_params(self.con.py_types, self.make_args(vals))
+
+        try:
+            if not self.con.in_transaction and not self.con.autocommit:
+                self.con.execute_unnamed("begin transaction")
+            self._context = self.con.execute_named(
+                self.name_bin, params, self.row_desc, self.input_funcs
+            )
+        except AttributeError as e:
+            if self.con is None:
+                raise InterfaceError("Cursor closed")
+            elif self.con._sock is None:
+                raise InterfaceError("connection is closed")
+            else:
+                raise e
+
+        return tuple() if self._context.rows is None else tuple(self._context.rows)
+
+    def close(self):
+        self.con.close_prepared_statement(self.name_bin)
+        self.con = None
+
+
+__all__ = [
+    "BIGINTEGER",
+    "BINARY",
+    "BOOLEAN",
+    "BOOLEAN_ARRAY",
+    "BYTES",
+    "Binary",
+    "CHAR",
+    "CHAR_ARRAY",
+    "Connection",
+    "Cursor",
+    "DATE",
+    "DATETIME",
+    "DECIMAL",
+    "DECIMAL_ARRAY",
+    "DataError",
+    "DatabaseError",
+    "Date",
+    "DateFromTicks",
+    "Error",
+    "FLOAT",
+    "FLOAT_ARRAY",
+    "INET",
+    "INT2VECTOR",
+    "INTEGER",
+    "INTEGER_ARRAY",
+    "INTERVAL",
+    "IntegrityError",
+    "InterfaceError",
+    "InternalError",
+    "JSON",
+    "JSONB",
+    "MACADDR",
+    "NAME",
+    "NAME_ARRAY",
+    "NULLTYPE",
+    "NUMBER",
+    "NotSupportedError",
+    "OID",
+    "OperationalError",
+    "PGInterval",
+    "ProgrammingError",
+    "ROWID",
+    "STRING",
+    "TEXT",
+    "TEXT_ARRAY",
+    "TIME",
+    "TIMEDELTA",
+    "TIMESTAMP",
+    "TIMESTAMPTZ",
+    "Time",
+    "TimeFromTicks",
+    "Timestamp",
+    "TimestampFromTicks",
+    "UNKNOWN",
+    "UUID_TYPE",
+    "VARCHAR",
+    "VARCHAR_ARRAY",
+    "Warning",
+    "XID",
+    "connect",
+    "pginterval_in",
+    "pginterval_out",
+    "timedelta_in",
+]
diff -pruN 1.10.6-3/pg8000/native.py 1.23.0-0ubuntu1/pg8000/native.py
--- 1.10.6-3/pg8000/native.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000/native.py	2021-11-13 10:07:05.000000000 +0000
@@ -0,0 +1,274 @@
+from collections import defaultdict
+
+from pg8000.converters import (
+    BIGINT,
+    BOOLEAN,
+    BOOLEAN_ARRAY,
+    BYTES,
+    CHAR,
+    CHAR_ARRAY,
+    DATE,
+    FLOAT,
+    FLOAT_ARRAY,
+    INET,
+    INT2VECTOR,
+    INTEGER,
+    INTEGER_ARRAY,
+    INTERVAL,
+    JSON,
+    JSONB,
+    MACADDR,
+    NAME,
+    NAME_ARRAY,
+    NULLTYPE,
+    NUMERIC,
+    NUMERIC_ARRAY,
+    OID,
+    PGInterval,
+    STRING,
+    TEXT,
+    TEXT_ARRAY,
+    TIME,
+    TIMESTAMP,
+    TIMESTAMPTZ,
+    UNKNOWN,
+    UUID_TYPE,
+    VARCHAR,
+    VARCHAR_ARRAY,
+    XID,
+    make_params,
+)
+from pg8000.core import CoreConnection
+from pg8000.exceptions import DatabaseError, Error, InterfaceError
+
+
+# Copyright (c) 2007-2009, Mathieu Fenniak
+# Copyright (c) The Contributors
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+
+OUTSIDE = 0  # outside quoted string
+INSIDE_SQ = 1  # inside single-quote string '...'
+INSIDE_QI = 2  # inside quoted identifier   "..."
+INSIDE_ES = 3  # inside escaped single-quote string, E'...'
+INSIDE_PN = 4  # inside parameter name eg. :name
+INSIDE_CO = 5  # inside inline comment eg. --
+
+
+def to_statement(query):
+    in_quote_escape = False
+    placeholders = []
+    output_query = []
+    state = OUTSIDE
+    prev_c = None
+    for i, c in enumerate(query):
+        if i + 1 < len(query):
+            next_c = query[i + 1]
+        else:
+            next_c = None
+
+        if state == OUTSIDE:
+            if c == "'":
+                output_query.append(c)
+                if prev_c == "E":
+                    state = INSIDE_ES
+                else:
+                    state = INSIDE_SQ
+            elif c == '"':
+                output_query.append(c)
+                state = INSIDE_QI
+            elif c == "-":
+                output_query.append(c)
+                if prev_c == "-":
+                    state = INSIDE_CO
+            elif c == ":" and next_c not in ":=" and prev_c != ":":
+                state = INSIDE_PN
+                placeholders.append("")
+            else:
+                output_query.append(c)
+
+        elif state == INSIDE_SQ:
+            if c == "'":
+                if in_quote_escape:
+                    in_quote_escape = False
+                elif next_c == "'":
+                    in_quote_escape = True
+                else:
+                    state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_QI:
+            if c == '"':
+                state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_ES:
+            if c == "'" and prev_c != "\\":
+                # check for escaped single-quote
+                state = OUTSIDE
+            output_query.append(c)
+
+        elif state == INSIDE_PN:
+            placeholders[-1] += c
+            if next_c is None or (not next_c.isalnum() and next_c != "_"):
+                state = OUTSIDE
+                try:
+                    pidx = placeholders.index(placeholders[-1], 0, -1)
+                    output_query.append(f"${pidx + 1}")
+                    del placeholders[-1]
+                except ValueError:
+                    output_query.append(f"${len(placeholders)}")
+
+        elif state == INSIDE_CO:
+            output_query.append(c)
+            if c == "\n":
+                state = OUTSIDE
+
+        prev_c = c
+
+    for reserved in ("types", "stream"):
+        if reserved in placeholders:
+            raise InterfaceError(
+                f"The name '{reserved}' can't be used as a placeholder because it's "
+                f"used for another purpose."
+            )
+
+    def make_vals(args):
+        vals = []
+        for p in placeholders:
+            try:
+                vals.append(args[p])
+            except KeyError:
+                raise InterfaceError(
+                    f"There's a placeholder '{p}' in the query, but no matching "
+                    f"keyword argument."
+                )
+        return tuple(vals)
+
+    return "".join(output_query), make_vals
+
+
+class Connection(CoreConnection):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._context = None
+
+    @property
+    def columns(self):
+        context = self._context
+        if context is None:
+            return None
+        return context.columns
+
+    @property
+    def row_count(self):
+        context = self._context
+        if context is None:
+            return None
+        return context.row_count
+
+    def run(self, sql, stream=None, types=None, **params):
+        if len(params) == 0 and stream is None:
+            self._context = self.execute_simple(sql)
+        else:
+            statement, make_vals = to_statement(sql)
+            oids = () if types is None else make_vals(defaultdict(lambda: None, types))
+            self._context = self.execute_unnamed(
+                statement, make_vals(params), oids=oids, stream=stream
+            )
+        return self._context.rows
+
+    def prepare(self, sql):
+        return PreparedStatement(self, sql)
+
+
+class PreparedStatement:
+    def __init__(self, con, sql, types=None):
+        self.con = con
+        statement, self.make_vals = to_statement(sql)
+        oids = () if types is None else self.make_vals(defaultdict(lambda: None, types))
+        self.name_bin, self.cols, self.input_funcs = con.prepare_statement(
+            statement, oids
+        )
+
+    @property
+    def columns(self):
+        return self._context.columns
+
+    def run(self, stream=None, **params):
+        params = make_params(self.con.py_types, self.make_vals(params))
+
+        self._context = self.con.execute_named(
+            self.name_bin, params, self.cols, self.input_funcs
+        )
+
+        return self._context.rows
+
+    def close(self):
+        self.con.close_prepared_statement(self.name_bin)
+
+
+__all__ = [
+    "BIGINT",
+    "BOOLEAN",
+    "BOOLEAN_ARRAY",
+    "BYTES",
+    "CHAR",
+    "CHAR_ARRAY",
+    "DATE",
+    "DatabaseError",
+    "Error",
+    "FLOAT",
+    "FLOAT_ARRAY",
+    "INET",
+    "INT2VECTOR",
+    "INTEGER",
+    "INTEGER_ARRAY",
+    "INTERVAL",
+    "InterfaceError",
+    "JSON",
+    "JSONB",
+    "MACADDR",
+    "NAME",
+    "NAME_ARRAY",
+    "NULLTYPE",
+    "NUMERIC",
+    "NUMERIC_ARRAY",
+    "OID",
+    "PGInterval",
+    "STRING",
+    "TEXT",
+    "TEXT_ARRAY",
+    "TIME",
+    "TIMESTAMP",
+    "TIMESTAMPTZ",
+    "UNKNOWN",
+    "UUID_TYPE",
+    "VARCHAR",
+    "VARCHAR_ARRAY",
+    "XID",
+]
diff -pruN 1.10.6-3/pg8000/_version.py 1.23.0-0ubuntu1/pg8000/_version.py
--- 1.10.6-3/pg8000/_version.py	2016-06-10 10:37:25.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000/_version.py	2021-11-13 10:21:00.416994000 +0000
@@ -1,18 +1,18 @@
 
-# This file was generated by 'versioneer.py' (0.15) from
+# This file was generated by 'versioneer.py' (0.19) from
 # revision-control system data, or from the parent directory name of an
 # unpacked source archive. Distribution tarballs contain a pre-generated copy
 # of this file.
 
 import json
-import sys
 
 version_json = '''
 {
+ "date": "2021-11-13T10:19:53+0000",
  "dirty": false,
  "error": null,
- "full-revisionid": "4098abf6be90683ab10b7b080983ed6f08476485",
- "version": "1.10.6"
+ "full-revisionid": "a83e85b4a3493efe4a53fdbe142d53306c1f5622",
+ "version": "1.23.0"
 }
 '''  # END VERSION_JSON
 
diff -pruN 1.10.6-3/pg8000.egg-info/PKG-INFO 1.23.0-0ubuntu1/pg8000.egg-info/PKG-INFO
--- 1.10.6-3/pg8000.egg-info/PKG-INFO	2016-06-10 10:37:17.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000.egg-info/PKG-INFO	2021-11-13 10:21:00.000000000 +0000
@@ -1,8 +1,8 @@
-Metadata-Version: 1.1
+Metadata-Version: 1.2
 Name: pg8000
-Version: 1.10.6
+Version: 1.23.0
 Summary: PostgreSQL interface library
-Home-page: https://github.com/mfenniak/pg8000
+Home-page: https://github.com/tlocke/pg8000
 Author: Mathieu Fenniak
 Author-email: biziqe@mathieu.fenniak.net
 License: BSD
@@ -15,16 +15,16 @@ Description:
         pg8000's name comes from the belief that it is probably about the 8000th PostgreSQL interface for Python.
 Keywords: postgresql dbapi
 Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
+Classifier: Development Status :: 5 - Production/Stable
 Classifier: Intended Audience :: Developers
 Classifier: License :: OSI Approved :: BSD License
 Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
 Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
 Classifier: Programming Language :: Python :: Implementation
 Classifier: Programming Language :: Python :: Implementation :: CPython
 Classifier: Programming Language :: Python :: Implementation :: Jython
@@ -32,3 +32,4 @@ Classifier: Programming Language :: Pyth
 Classifier: Operating System :: OS Independent
 Classifier: Topic :: Database :: Front-Ends
 Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.6
diff -pruN 1.10.6-3/pg8000.egg-info/requires.txt 1.23.0-0ubuntu1/pg8000.egg-info/requires.txt
--- 1.10.6-3/pg8000.egg-info/requires.txt	2016-06-10 10:37:17.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000.egg-info/requires.txt	2021-11-13 10:21:00.000000000 +0000
@@ -1 +1 @@
-six>=1.10.0
+scramp>=1.4.1
diff -pruN 1.10.6-3/pg8000.egg-info/SOURCES.txt 1.23.0-0ubuntu1/pg8000.egg-info/SOURCES.txt
--- 1.10.6-3/pg8000.egg-info/SOURCES.txt	2016-06-10 10:37:21.000000000 +0000
+++ 1.23.0-0ubuntu1/pg8000.egg-info/SOURCES.txt	2021-11-13 10:21:00.000000000 +0000
@@ -1,21 +1,75 @@
 LICENSE
 MANIFEST.in
-README.creole
+README.adoc
 setup.cfg
 setup.py
 versioneer.py
-doc/Makefile
-doc/conf.py
-doc/dbapi.rst
-doc/index.rst
-doc/quickstart.rst
-doc/release_notes.rst
-doc/types.rst
 pg8000/__init__.py
 pg8000/_version.py
+pg8000/converters.py
 pg8000/core.py
+pg8000/dbapi.py
+pg8000/exceptions.py
+pg8000/legacy.py
+pg8000/native.py
 pg8000.egg-info/PKG-INFO
 pg8000.egg-info/SOURCES.txt
 pg8000.egg-info/dependency_links.txt
 pg8000.egg-info/requires.txt
-pg8000.egg-info/top_level.txt
\ No newline at end of file
+pg8000.egg-info/top_level.txt
+test/__init__.py
+test/dbapi/__init__.py
+test/dbapi/conftest.py
+test/dbapi/test_auth.py
+test/dbapi/test_benchmarks.py
+test/dbapi/test_connection.py
+test/dbapi/test_copy.py
+test/dbapi/test_dbapi.py
+test/dbapi/test_dbapi20.py
+test/dbapi/test_paramstyle.py
+test/dbapi/test_query.py
+test/dbapi/test_typeconversion.py
+test/dbapi/github-actions/__init__.py
+test/dbapi/github-actions/gss_dbapi.py
+test/dbapi/github-actions/md5_dbapi.py
+test/dbapi/github-actions/password_dbapi.py
+test/dbapi/github-actions/scram-sha-256_dbapi.py
+test/dbapi/github-actions/ssl_dbapi.py
+test/legacy/__init__.py
+test/legacy/conftest.py
+test/legacy/stress.py
+test/legacy/test_auth.py
+test/legacy/test_benchmarks.py
+test/legacy/test_connection.py
+test/legacy/test_copy.py
+test/legacy/test_dbapi.py
+test/legacy/test_dbapi20.py
+test/legacy/test_error_recovery.py
+test/legacy/test_paramstyle.py
+test/legacy/test_prepared_statement.py
+test/legacy/test_query.py
+test/legacy/test_typeconversion.py
+test/legacy/test_typeobjects.py
+test/legacy/github-actions/__init__.py
+test/legacy/github-actions/gss_legacy.py
+test/legacy/github-actions/md5_legacy.py
+test/legacy/github-actions/password_legacy.py
+test/legacy/github-actions/scram-sha-256_legacy.py
+test/legacy/github-actions/ssl_legacy.py
+test/native/__init__.py
+test/native/conftest.py
+test/native/test_auth.py
+test/native/test_benchmarks.py
+test/native/test_connection.py
+test/native/test_converters.py
+test/native/test_copy.py
+test/native/test_core.py
+test/native/test_prepared_statement.py
+test/native/test_query.py
+test/native/test_typeconversion.py
+test/native/github-actions/__init__.py
+test/native/github-actions/gss_native.py
+test/native/github-actions/md5_native.py
+test/native/github-actions/password_native.py
+test/native/github-actions/scram-sha-256_native.py
+test/native/github-actions/ssl_native.py
\ No newline at end of file
diff -pruN 1.10.6-3/PKG-INFO 1.23.0-0ubuntu1/PKG-INFO
--- 1.10.6-3/PKG-INFO	2016-06-10 10:37:25.000000000 +0000
+++ 1.23.0-0ubuntu1/PKG-INFO	2021-11-13 10:21:00.416994000 +0000
@@ -1,8 +1,8 @@
-Metadata-Version: 1.1
+Metadata-Version: 1.2
 Name: pg8000
-Version: 1.10.6
+Version: 1.23.0
 Summary: PostgreSQL interface library
-Home-page: https://github.com/mfenniak/pg8000
+Home-page: https://github.com/tlocke/pg8000
 Author: Mathieu Fenniak
 Author-email: biziqe@mathieu.fenniak.net
 License: BSD
@@ -15,16 +15,16 @@ Description:
         pg8000's name comes from the belief that it is probably about the 8000th PostgreSQL interface for Python.
 Keywords: postgresql dbapi
 Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
+Classifier: Development Status :: 5 - Production/Stable
 Classifier: Intended Audience :: Developers
 Classifier: License :: OSI Approved :: BSD License
 Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
 Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
 Classifier: Programming Language :: Python :: Implementation
 Classifier: Programming Language :: Python :: Implementation :: CPython
 Classifier: Programming Language :: Python :: Implementation :: Jython
@@ -32,3 +32,4 @@ Classifier: Programming Language :: Pyth
 Classifier: Operating System :: OS Independent
 Classifier: Topic :: Database :: Front-Ends
 Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.6
diff -pruN 1.10.6-3/README.adoc 1.23.0-0ubuntu1/README.adoc
--- 1.10.6-3/README.adoc	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/README.adoc	2021-11-13 10:19:30.000000000 +0000
@@ -0,0 +1,3235 @@
+= pg8000
+:toc: preamble
+
+pg8000 is a pure-link:http://www.python.org/[Python]
+http://www.postgresql.org/[PostgreSQL] driver that complies with
+http://www.python.org/dev/peps/pep-0249/[DB-API 2.0]. It is tested on Python
+versions 3.6+, on CPython and PyPy, and PostgreSQL versions 9.6+.
+pg8000's name comes from the belief that it is probably about the 8000th
+PostgreSQL interface for Python. pg8000 is distributed under the BSD 3-clause
+license.
+
+All bug reports, feature requests and contributions are welcome at
+http://github.com/tlocke/pg8000/.
+
+image::https://github.com/tlocke/pg8000/workflows/pg8000/badge.svg[Build Status]
+
+
+== Installation
+
+To install pg8000 using `pip` type:
+
+`pip install pg8000`
+
+
+== Native API Interactive Examples
+
+pg8000 comes with two APIs, the native pg8000 API and the DB-API 2.0 standard
+API. These are the examples for the native API, and the DB-API 2.0 examples
+follow in the next section.
+
+
+=== Basic Example
+
+Import pg8000, connect to the database, create a table, add some rows and then
+query the table:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> # Connect to the database with user name postgres
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> # Create a temporary table
+>>>
+>>> con.run("CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)")
+>>>
+>>> # Populate the table
+>>>
+>>> for title in ("Ender's Game", "The Magus"):
+...     con.run("INSERT INTO book (title) VALUES (:title)", title=title)
+>>>
+>>> # Print all the rows in the table
+>>>
+>>> for row in con.run("SELECT * FROM book"):
+...     print(row)
+[1, "Ender's Game"]
+[2, 'The Magus']
+
+----
+
+
+=== Transactions
+
+Here's how to run groups of SQL statements in a
+https://www.postgresql.org/docs/current/tutorial-transactions.html[transaction]:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("START TRANSACTION")
+>>>
+>>> # Create a temporary table
+>>> con.run("CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)")
+>>>
+>>> for title in ("Ender's Game", "The Magus", "Phineas Finn"):
+...     con.run("INSERT INTO book (title) VALUES (:title)", title=title)
+>>> con.run("COMMIT")
+>>> for row in con.run("SELECT * FROM book"):
+...     print(row)
+[1, "Ender's Game"]
+[2, 'The Magus']
+[3, 'Phineas Finn']
+
+----
+
+rolling back a transaction:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> # Create a temporary table
+>>> con.run("CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)")
+>>>
+>>> for title in ("Ender's Game", "The Magus", "Phineas Finn"):
+...     con.run("INSERT INTO book (title) VALUES (:title)", title=title)
+>>>
+>>> con.run("START TRANSACTION")
+>>> con.run("DELETE FROM book WHERE title = :title", title="Phineas Finn") 
+>>> con.run("ROLLBACK")
+>>> for row in con.run("SELECT * FROM book"):
+...     print(row)
+[1, "Ender's Game"]
+[2, 'The Magus']
+[3, 'Phineas Finn']
+
+----
+
+
+=== Query Using Fuctions
+
+Another query, using some PostgreSQL functions:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("SELECT TO_CHAR(TIMESTAMP '2021-10-10', 'YYYY BC')")
+[['2021 AD']]
+
+----
+
+
+=== Interval Type
+
+A query that returns the PostgreSQL interval type:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> import datetime
+>>>
+>>> ts = datetime.date(1980, 4, 27)
+>>> con.run("SELECT timestamp '2013-12-01 16:06' - :ts", ts=ts)
+[[datetime.timedelta(days=12271, seconds=57960)]]
+
+----
+
+
+=== Point Type
+
+A round-trip with a
+https://www.postgresql.org/docs/current/datatype-geometric.html[PostgreSQL
+point] type:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("SELECT CAST(:pt as point)", pt='(2.3,1)')
+[['(2.3,1)']]
+
+----
+
+
+=== Client Encoding
+
+When communicating with the server, pg8000 uses the character set that the
+server asks it to use (the client encoding). By default the client encoding is
+the database's character set (chosen when the database is created), but the
+client encoding can be changed in a number of ways (eg. setting
+CLIENT_ENCODING in postgresql.conf). Another way of changing the client
+encoding is by using an SQL command. For example:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("SET CLIENT_ENCODING TO 'UTF8'")
+>>> con.run("SHOW CLIENT_ENCODING")
+[['UTF8']]
+
+----
+
+
+=== JSON
+
+https://www.postgresql.org/docs/current/datatype-json.html[JSON] always comes
+back from the server de-serialized. If the JSON you want to send is a `dict`
+then you can just do:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> val = {'name': 'Apollo 11 Cave', 'zebra': True, 'age': 26.003}
+>>> con.run("SELECT CAST(:apollo as jsonb)", apollo=val)
+[[{'age': 26.003, 'name': 'Apollo 11 Cave', 'zebra': True}]]
+
+----
+
+JSON can always be sent in serialized form to the server:
+
+[source,python]
+----
+>>> import json
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>>
+>>> val = ['Apollo 11 Cave', True, 26.003]
+>>> con.run("SELECT CAST(:apollo as jsonb)", apollo=json.dumps(val))
+[[['Apollo 11 Cave', True, 26.003]]]
+
+----
+
+
+=== Retrieve Column Metadata From Results
+
+Find the column metadata returned from a query:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("create temporary table quark (id serial, name text)")
+>>> for name in ('Up', 'Down'):
+...     con.run("INSERT INTO quark (name) VALUES (:name)", name=name)
+>>> # Now execute the query
+>>>
+>>> con.run("SELECT * FROM quark")
+[[1, 'Up'], [2, 'Down']]
+>>>
+>>> # and retried the metadata
+>>>
+>>> con.columns
+[{'table_oid': ..., 'column_attrnum': 1, 'type_oid': 23, 'type_size': 4, 'type_modifier': -1, 'format': 0, 'name': 'id'}, {'table_oid': ..., 'column_attrnum': 2, 'type_oid': 25, 'type_size': -1, 'type_modifier': -1, 'format': 0, 'name': 'name'}]
+>>>
+>>> # Show just the column names
+>>>
+>>> [c['name'] for c in con.columns]
+['id', 'name']
+
+----
+
+
+=== Notices And Notifications
+
+PostgreSQL https://www.postgresql.org/docs/current/static/plpgsql-errors-and-messages.html[notices]
+are stored in a deque called `Connection.notices` and added using the
+`append()` method. Similarly there are `Connection.notifications` for
+https://www.postgresql.org/docs/current/static/sql-notify.html[notifications]
+and `Connection.parameter_statuses` for changes to the server configuration.
+Here's an example:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("LISTEN aliens_landed")
+>>> con.run("NOTIFY aliens_landed")
+>>> # A notification is a tuple containing (backend_pid, channel, payload)
+>>>
+>>> con.notifications[0]
+(..., 'aliens_landed', '')
+
+----
+
+
+=== LIMIT ALL
+
+You might think that the following would work, but in fact it fails:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("SELECT 'silo 1' LIMIT :lim", lim='ALL')
+Traceback (most recent call last):
+pg8000.exceptions.DatabaseError: ...
+
+----
+
+Instead the https://www.postgresql.org/docs/current/sql-select.html[docs say]
+that you can send `null` as an alternative to `ALL`, which does work:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("SELECT 'silo 1' LIMIT :lim", lim=None)
+[['silo 1']]
+
+----
+
+
+=== IN and NOT IN
+
+You might think that the following would work, but in fact the server doesn't
+like it:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("SELECT 'silo 1' WHERE 'a' IN :v", v=('a', 'b'))
+Traceback (most recent call last):
+pg8000.exceptions.DatabaseError: ...
+
+----
+
+instead you can write it using the
+https://www.postgresql.org/docs/current/functions-array.html[`unnest`]
+function:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run(
+...     "SELECT 'silo 1' WHERE 'a' IN (SELECT unnest(CAST(:v as varchar[])))",
+...     v=('a', 'b'))
+[['silo 1']]
+
+----
+
+and you can do the same for `NOT IN`.
+
+
+=== Many SQL Statements Can't Be Parameterized
+
+In PostgreSQL parameters can only be used for
+https://www.postgresql.org/docs/current/xfunc-sql.html#XFUNC-SQL-FUNCTION-ARGUMENTS[data values, not identifiers]. Sometimes this might not work as expected,
+for example the following fails:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("CREATE USER juan WITH PASSWORD :password", password='quail')
+Traceback (most recent call last):
+pg8000.exceptions.DatabaseError: ...
+
+----
+
+It fails because the PostgreSQL server doesn't allow this statement to have
+any parameters. There are many SQL statements that one might think would have
+parameters, but don't.
+
+
+=== COPY from and to a file
+
+The SQL https://www.postgresql.org/docs/current/sql-copy.html[COPY] statement
+can be used to copy from and to a file or file-like object. Here' an example
+using the CSV format:
+
+[source,python]
+----
+
+>>> import pg8000.native
+>>> from io import StringIO
+>>> import csv
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> # Create a CSV file in memory
+>>>
+>>> stream_in = StringIO()
+>>> csv_writer = csv.writer(stream_in)
+>>> csv_writer.writerow([1, "electron"])
+12
+>>> csv_writer.writerow([2, "muon"])
+8
+>>> csv_writer.writerow([3, "tau"])
+7
+>>> stream_in.seek(0)
+0
+>>>
+>>> # Create a table and then copy the CSV into it
+>>>
+>>> con.run("CREATE TEMPORARY TABLE lepton (id SERIAL, name TEXT)")
+>>> con.run("COPY lepton FROM STDIN WITH (FORMAT CSV)", stream=stream_in)
+>>>
+>>> # COPY from a table to a stream
+>>>
+>>> stream_out = StringIO()
+>>> con.run("COPY lepton TO STDOUT WITH (FORMAT CSV)", stream=stream_out)
+>>> stream_out.seek(0)
+0
+>>> for row in csv.reader(stream_out):
+...     print(row)
+['1', 'electron']
+['2', 'muon']
+['3', 'tau']
+
+----
+
+
+=== Execute Multiple SQL Statements
+
+If you want to execute a series of SQL statements (eg. an `.sql` file), you
+can run them as expected:
+
+[source,python]
+----
+
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> statements = "SELECT 5; SELECT 'Erich Fromm';"
+>>>
+>>> con.run(statements)
+[[5], ['Erich Fromm']]
+
+----
+
+The only caveat is that when executing multiple statements you can't have any
+parameters.
+
+
+=== Quoted Identifiers in SQL
+
+Say you had a column called `My Column`. Since it's case sensitive and
+contains a space, you'd have to
+https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERSdouble[surround it by double quotes]. But you can't do:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("select 'hello' as "My Column"")
+Traceback (most recent call last):
+SyntaxError: invalid syntax...
+
+----
+
+since Python uses double quotes to delimit string literals, so one solution is
+to use Python's
+https://docs.python.org/3/tutorial/introduction.html#strings[triple quotes]
+to delimit the string instead:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run('''select 'hello' as "My Column"''')
+[['hello']]
+
+----
+
+
+=== Custom adapter from a Python type to a PostgreSQL type
+
+pg8000 has a mapping from Python types to PostgreSQL types for when it needs
+to send SQL parameters to the server. The default mapping that comes with
+pg8000 is designed to work well in most cases, but you might want to add or
+replace the default mapping.
+
+A Python `datetime.timedelta` object is sent to the server as a PostgreSQL
+`interval` type,  which has the `oid` 1186. But let's say we wanted to create
+our own Python class to be sent as an `interval` type. Then we'd have to
+register an adapter:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> class MyInterval(str):
+...     pass
+>>>
+>>> def my_interval_out(my_interval):
+...     return my_interval  # Must return a str
+>>>
+>>> con.register_out_adapter(MyInterval, my_interval_out)
+>>> con.run("SELECT CAST(:interval as interval)", interval=MyInterval("2 hours"))
+[[datetime.timedelta(seconds=7200)]]
+
+----
+
+Note that it still came back as a `datetime.timedelta` object because we only
+changed the mapping from Python to PostgreSQL. See below for an example of how
+to change the mapping from PostgreSQL to Python.
+
+
+=== Custom adapter from a PostgreSQL type to a Python type
+
+pg8000 has a mapping from PostgreSQL types to Python types for when it receives
+SQL results from the server. The default mapping that comes with pg8000 is
+designed to work well in most cases, but you might want to add or replace the
+default mapping.
+
+If pg800 recieves PostgreSQL `interval` type, which has the `oid` 1186, it
+converts it into a Python `datetime.timedelta` object. But let's say we wanted
+to create our own Python class to be used instead of `datetime.timedelta`. Then
+we'd have to register an adapter:
+
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> class MyInterval(str):
+...     pass
+>>>
+>>> def my_interval_in(my_interval_str):  # The parameter is of type str
+...     return MyInterval(my_interval)
+>>>
+>>> con.register_in_adapter(1186, my_interval_in)
+>>> con.run("SELECT \'2 years'")
+[['2 years']]
+
+----
+
+Note that registering the 'in' adapter only afects the mapping from the
+PostgreSQL type to the Python type. See above for an example of how to change
+the mapping from PostgreSQL to Python.
+
+
+=== Could Not Determine Data Type Of Parameter
+
+Sometimes you'll get the 'could not determine data type of parameter' error
+message from the server:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("SELECT :v IS NULL", v=None)
+Traceback (most recent call last):
+pg8000.exceptions.DatabaseError: {'S': 'ERROR', 'V': 'ERROR', 'C': '42P18', 'M': 'could not determine data type of parameter $1', 'F': 'postgres.c', 'L': '...', 'R': 'exec_parse_message'}
+
+----
+
+One way of solving it is to put a `cast` in the SQL:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+
+>>> con.run("SELECT cast(:v as TIMESTAMP) IS NULL", v=None)
+[[True]]
+
+----
+
+Another way is to override the type that pg8000 sends along with each
+parameter:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> con.run("SELECT :v IS NULL", v=None, types={'v': pg8000.native.TIMESTAMP})
+[[True]]
+
+----
+
+
+=== Prepared Statements
+
+https://www.postgresql.org/docs/current/sql-prepare.html[Prepared statements]
+can be useful in improving performance when you have a statement that's
+executed repeatedly. Here's an example:
+
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection("postgres", password="cpsnow")
+>>>
+>>> # Create the prepared statement
+>>> ps = con.prepare("SELECT cast(:v as varchar)")
+>>>
+>>> # Exceute the statement repeatedly
+>>> ps.run(v="speedy")
+[['speedy']]
+>>> ps.run(v="rapid")
+[['rapid']]
+>>> ps.run(v="swift")
+[['swift']]
+>>>
+>>> # Close the prepared statement, releasing resources on the server
+>>> ps.close()
+
+----
+
+
+=== Use Environment Variables As Connection Defaults
+
+You might want to use the current user as the database username for example:
+
+[source,python]
+----
+>>> import pg8000.native
+>>> import getpass
+>>>
+>>> # Connect to the database with current user name
+>>> username = getpass.getuser()
+>>> connection = pg8000.native.Connection(username, password="cpsnow")
+>>>
+>>> connection.run("SELECT 'pilau'")
+[['pilau']]
+
+----
+
+or perhaps you may want to use some of the same
+https://www.postgresql.org/docs/current/libpq-envars.html[environment variables
+that libpq uses]:
+
+[source,python]
+----
+>>> import pg8000.native
+>>> from os import environ
+>>>
+>>> username = environ.get('PGUSER', 'postgres')
+>>> password = environ.get('PGPASSWORD', 'cpsnow')
+>>> host = environ.get('PGHOST', 'localhost')
+>>> port = environ.get('PGPORT', '5432')
+>>> database = environ.get('PGDATABASE')
+>>>
+>>> connection = pg8000.native.Connection(
+...     username, password=password, host=host, port=port, database=database)
+>>>
+>>> connection.run("SELECT 'Mr Cairo'")
+[['Mr Cairo']]
+
+----
+
+It might be asked, why doesn't pg8000 have this behaviour built in? The
+thinking follows the second aphorism of
+https://www.python.org/dev/peps/pep-0020/[The Zen of Python]:
+
+[quote]
+Explicit is better than implicit.
+
+So we've taken the approach of only being able to set connection parameters
+using the `pg8000.native.Connection()` constructor.
+
+
+=== Connect To PostgreSQL Over SSL
+
+To connect to the server using SSL defaults do:
+
+[source,python]
+----
+
+import pg8000.native
+
+
+connection = pg8000.native.Connection(
+    username, password="cpsnow", ssl_context=True)
+connection.run("SELECT 'The game is afoot!'")
+
+----
+
+To connect over SSL with custom settings, set the `ssl_context` parameter to
+an https://docs.python.org/3/library/ssl.html#ssl.SSLContext[`ssl.SSLContext`]
+object:
+
+[source,python]
+----
+
+import pg8000.native
+import ssl
+
+
+ssl_context = ssl.SSLContext()
+ssl_context.verify_mode = ssl.CERT_REQUIRED
+ssl_context.load_verify_locations('root.pem')        
+connection = pg8000.native.Connection(
+    username, password="cpsnow", ssl_context=ssl_context)
+
+----
+
+It may be that your PostgreSQL server is behind an SSL proxy server in which
+case you can set a pg8000-specific attribute
+`ssl.SSLContext.request_ssl = False` which tells pg8000 to connect using an
+SSL socket, but not to request SSL from the PostgreSQL server:
+
+[source,python]
+----
+
+import pg8000.native
+import ssl
+
+
+ssl_context = ssl.SSLContext()
+ssl_context.request_ssl = False
+connection = pg8000.native.Connection(
+    username, password="cpsnow", ssl_context=ssl_context)
+
+----
+
+
+=== Server-Side Cursors
+
+You can use the SQL commands
+https://www.postgresql.org/docs/current/sql-declare.html[`DECLARE`],
+https://www.postgresql.org/docs/current/sql-fetch.html[`FETCH`],
+https://www.postgresql.org/docs/current/sql-move.html[`MOVE`] and
+https://www.postgresql.org/docs/current/sql-close.html[`CLOSE`] to manipulate
+server-side cursors. For example:
+
+[source,python]
+----
+
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection(username, password="cpsnow")
+>>> con.run("START TRANSACTION")
+>>> con.run("DECLARE c SCROLL CURSOR FOR SELECT * FROM generate_series(1, 100)")
+>>> con.run("FETCH FORWARD 5 FROM c")
+[[1], [2], [3], [4], [5]]
+>>> con.run("MOVE FORWARD 50 FROM c")
+>>> con.run("FETCH BACKWARD 10 FROM c")
+[[54], [53], [52], [51], [50], [49], [48], [47], [46], [45]]
+>>> con.run("CLOSE c")
+>>> con.run("ROLLBACK")
+
+----
+
+
+=== BLOBs (Binary Large Objects)
+
+There's a set of
+https://www.postgresql.org/docs/current/lo-funcs.html[SQL functions]
+for manipulating BLOBs. Here's an example:
+
+[source,python]
+----
+>>> import pg8000.native
+>>>
+>>> con = pg8000.native.Connection(username, password="cpsnow")
+>>>
+>>> # Create a BLOB and get its oid
+>>> data = b'hello'
+>>> res = con.run("SELECT lo_from_bytea(0, :data)", data=data)
+>>> oid = res[0][0]
+>>>
+>>> # Create a table and store the oid of the BLOB
+>>> con.run("CREATE TEMPORARY TABLE image (raster oid)")
+>>>
+>>> con.run("INSERT INTO image (raster) VALUES (:oid)", oid=oid)
+>>> # Retrieve the data using the oid
+>>> con.run("SELECT lo_get(:oid)", oid=oid)
+[[b'hello']]
+>>>
+>>> # Add some data to the end of the BLOB
+>>> more_data = b' all'
+>>> offset = len(data)
+>>> con.run(
+...     "SELECT lo_put(:oid, :offset, :data)",
+...     oid=oid, offset=offset, data=more_data)
+[['']]
+>>> con.run("SELECT lo_get(:oid)", oid=oid)
+[[b'hello all']]
+>>>
+>>> # Download a part of the data
+>>> con.run("SELECT lo_get(:oid, 6, 3)", oid=oid)
+[[b'all']]
+
+----
+
+
+== DB-API 2 Interactive Examples
+
+These examples stick to the DB-API 2.0 standard.
+
+
+=== Basic Example
+
+Import pg8000, connect to the database, create a table, add some rows and then
+query the table:
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> conn = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cursor = conn.cursor()
+>>> cursor.execute("CREATE TEMPORARY TABLE book (id SERIAL, title TEXT)")
+>>> cursor.execute(
+...     "INSERT INTO book (title) VALUES (%s), (%s) RETURNING id, title",
+...     ("Ender's Game", "Speaker for the Dead"))
+>>> results = cursor.fetchall()
+>>> for row in results:
+...     id, title = row
+...     print("id = %s, title = %s" % (id, title))
+id = 1, title = Ender's Game
+id = 2, title = Speaker for the Dead
+>>> conn.commit()
+
+----
+
+
+=== Query Using Fuctions
+
+Another query, using some PostgreSQL functions:
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cursor = con.cursor()
+>>>
+>>> cursor.execute("SELECT TO_CHAR(TIMESTAMP '2021-10-10', 'YYYY BC')")
+>>> cursor.fetchone()
+['2021 AD']
+
+----
+
+
+=== Interval Type
+
+A query that returns the PostgreSQL interval type:
+
+[source,python]
+----
+>>> import datetime
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cursor = con.cursor()
+>>>
+>>> cursor.execute("SELECT timestamp '2013-12-01 16:06' - %s",
+... (datetime.date(1980, 4, 27),))
+>>> cursor.fetchone()
+[datetime.timedelta(days=12271, seconds=57960)]
+
+----
+
+
+=== Point Type
+
+A round-trip with a
+https://www.postgresql.org/docs/current/datatype-geometric.html[PostgreSQL
+point] type:
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cursor = con.cursor()
+>>>
+>>> cursor.execute("SELECT cast(%s as point)", ('(2.3,1)',))
+>>> cursor.fetchone()
+['(2.3,1)']
+
+----
+
+
+=== Numeric Parameter Style
+
+pg8000 supports all the DB-API parameter styles. Here's an example of using
+the 'numeric' parameter style:
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> pg8000.dbapi.paramstyle = "numeric"
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cursor = con.cursor()
+>>>
+>>> cursor.execute("SELECT array_prepend(:1, CAST(:2 AS int[]))", (500, [1, 2, 3, 4],))
+>>> cursor.fetchone()
+[[500, 1, 2, 3, 4]]
+>>> pg8000.dbapi.paramstyle = "format"
+>>> conn.rollback()
+
+----
+
+
+=== Autocommit
+
+Following the DB-API specification, autocommit is off by default. It can be
+turned on by using the autocommit property of the connection.
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> con.autocommit = True
+>>>
+>>> cur = con.cursor()
+>>> cur.execute("vacuum")
+>>> conn.autocommit = False
+>>> cur.close()
+
+----
+
+
+=== Client Encoding
+
+When communicating with the server, pg8000 uses the character set that the
+server asks it to use (the client encoding). By default the client encoding is
+the database's character set (chosen when the database is created), but the
+client encoding can be changed in a number of ways (eg. setting
+CLIENT_ENCODING in postgresql.conf). Another way of changing the client
+encoding is by using an SQL command. For example:
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cur = con.cursor()
+>>> cur.execute("SET CLIENT_ENCODING TO 'UTF8'")
+>>> cur.execute("SHOW CLIENT_ENCODING")
+>>> cur.fetchone()
+['UTF8']
+>>> cur.close()
+
+----
+
+
+=== JSON
+
+JSON is sent to the server serialized, and returned de-serialized. Here's an
+example:
+
+[source,python]
+----
+>>> import json
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cur = con.cursor()
+>>> val = ['Apollo 11 Cave', True, 26.003]
+>>> cur.execute("SELECT cast(%s as json)", (json.dumps(val),))
+>>> cur.fetchone()
+[['Apollo 11 Cave', True, 26.003]]
+>>> cur.close()
+
+----
+
+
+=== Retrieve Column Names From Results
+
+Use the columns names retrieved from a query:
+
+[source,python]
+----
+>>> import pg8000
+>>> conn = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> c = conn.cursor()
+>>> c.execute("create temporary table quark (id serial, name text)")
+>>> c.executemany("INSERT INTO quark (name) VALUES (%s)", (("Up",), ("Down",)))
+>>> #
+>>> # Now retrieve the results
+>>> #
+>>> c.execute("select * from quark")
+>>> rows = c.fetchall()
+>>> keys = [k[0] for k in c.description]
+>>> results = [dict(zip(keys, row)) for row in rows]
+>>> assert results == [{'id': 1, 'name': 'Up'}, {'id': 2, 'name': 'Down'}]
+
+----
+
+
+=== Notices
+
+PostgreSQL https://www.postgresql.org/docs/current/static/plpgsql-errors-and-messages.html[notices]
+are stored in a deque called `Connection.notices` and added using the
+`append()` method. Similarly there are `Connection.notifications` for
+https://www.postgresql.org/docs/current/static/sql-notify.html[notifications]
+and `Connection.parameter_statuses` for changes to the server configuration.
+Here's an example:
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cur = con.cursor()
+>>> cur.execute("LISTEN aliens_landed")
+>>> cur.execute("NOTIFY aliens_landed")
+>>> con.commit()
+>>> con.notifications[0][1]
+'aliens_landed'
+
+----
+
+
+=== COPY from and to a file
+
+The SQL https://www.postgresql.org/docs/current/sql-copy.html[COPY] statement
+can be used to copy from and to a file or file-like object:
+
+[source,python]
+----
+>>> from io import StringIO
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cur = con.cursor()
+>>> #
+>>> # COPY from a stream to a table
+>>> #
+>>> stream_in = StringIO('1\telectron\n2\tmuon\n3\ttau\n')
+>>> cur = conn.cursor()
+>>> cur.execute("create temporary table lepton (id serial, name text)")
+>>> cur.execute("COPY lepton FROM stdin", stream=stream_in)
+>>> #
+>>> # Now COPY from a table to a stream
+>>> #
+>>> stream_out = StringIO()
+>>> cur.execute("copy lepton to stdout", stream=stream_out)
+>>> stream_out.getvalue()
+'1\telectron\n2\tmuon\n3\ttau\n'
+
+----
+
+
+=== Server-Side Cursors
+
+You can use the SQL commands
+https://www.postgresql.org/docs/current/sql-declare.html[`DECLARE`],
+https://www.postgresql.org/docs/current/sql-fetch.html[`FETCH`],
+https://www.postgresql.org/docs/current/sql-move.html[`MOVE`] and
+https://www.postgresql.org/docs/current/sql-close.html[`CLOSE`] to manipulate
+server-side cursors. For example:
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cur = con.cursor()
+>>> cur.execute("START TRANSACTION")
+>>> cur.execute(
+...    "DECLARE c SCROLL CURSOR FOR SELECT * FROM generate_series(1, 100)")
+>>> cur.execute("FETCH FORWARD 5 FROM c")
+>>> cur.fetchall()
+([1], [2], [3], [4], [5])
+>>> cur.execute("MOVE FORWARD 50 FROM c")
+>>> cur.execute("FETCH BACKWARD 10 FROM c")
+>>> cur.fetchall()
+([54], [53], [52], [51], [50], [49], [48], [47], [46], [45])
+>>> cur.execute("CLOSE c")
+>>> cur.execute("ROLLBACK")
+
+----
+
+
+=== BLOBs (Binary Large Objects)
+
+There's a set of
+https://www.postgresql.org/docs/current/lo-funcs.html[SQL functions]
+for manipulating BLOBs. Here's an example:
+
+[source,python]
+----
+>>> import pg8000.dbapi
+>>>
+>>> con = pg8000.dbapi.connect(user="postgres", password="cpsnow")
+>>> cur = con.cursor()
+>>>
+>>> # Create a BLOB and get its oid
+>>> data = b'hello'
+>>> cur = conn.cursor()
+>>> cur.execute("SELECT lo_from_bytea(0, %s)", [data])
+>>> oid = cur.fetchone()[0]
+>>>
+>>> # Create a table and store the oid of the BLOB
+>>> cur.execute("CREATE TEMPORARY TABLE image (raster oid)")
+>>> cur.execute("INSERT INTO image (raster) VALUES (%s)", [oid])
+>>>
+>>> # Retrieve the data using the oid
+>>> cur.execute("SELECT lo_get(%s)", [oid])
+>>> cur.fetchall()
+([b'hello'],)
+>>>
+>>> # Add some data to the end of the BLOB
+>>> more_data = b' all'
+>>> offset = len(data)
+>>> cur.execute("SELECT lo_put(%s, %s, %s)", [oid, offset, more_data])
+>>> cur.execute("SELECT lo_get(%s)", [oid])
+>>> cur.fetchall()
+([b'hello all'],)
+>>>
+>>> # Download a part of the data
+>>> cur.execute("SELECT lo_get(%s, 6, 3)", [oid])
+>>> cur.fetchall()
+([b'all'],)
+
+----
+
+
+
+== Type Mapping
+
+The following table shows the default mapping between Python types and
+PostgreSQL types, and vice versa.
+
+If pg8000 doesn't recognize a type that it receives from PostgreSQL, it will
+return it as a `str` type. This is how pg8000 handles PostgreSQL `enum` and
+XML types. It's possible to change the default mapping using adapters (see the
+examples).
+
+.Python to PostgreSQL Type Mapping
+|===
+| Python Type | PostgreSQL Type | Notes
+
+| bool
+| bool
+|
+
+| int
+| int4
+|
+
+| str
+| text
+|
+
+| float
+| float8
+|
+
+| decimal.Decimal
+| numeric
+|
+
+| bytes
+| bytea
+|
+
+| datetime.datetime (without tzinfo)
+| timestamp without timezone
+| See note below.
+
+| datetime.datetime (with tzinfo)
+| timestamp with timezone
+| See note below.
+
+| datetime.date
+| date
+| See note below.
+
+| datetime.time
+| time without time zone
+|
+
+| datetime.timedelta
+| interval
+|
+
+| None
+| NULL
+|
+
+| uuid.UUID
+| uuid
+|
+
+| ipaddress.IPv4Address
+| inet
+|
+
+| ipaddress.IPv6Address
+| inet
+|
+
+| ipaddress.IPv4Network
+| inet
+|
+
+| ipaddress.IPv6Network
+| inet
+|
+
+| int
+| xid
+|
+
+| list of int
+| INT4[]
+|
+
+| list of float
+| FLOAT8[]
+|
+
+| list of bool
+| BOOL[]
+|
+
+| list of str
+| TEXT[]
+|
+
+| int
+| int2vector
+| Only from PostgreSQL to Python
+
+| JSON
+| json, jsonb
+| The Python JSON is provided as a Python serialized string. Results returned
+  as de-serialized JSON.
+|===
+
+
+[[_theory_of_operation]]
+== Theory Of Operation
+
+{empty} +
+
+[quote, Jochen Liedtke, Liedtke's minimality principle]
+____
+A concept is tolerated inside the microkernel only if moving it outside the
+kernel, i.e., permitting competing implementations, would prevent the
+implementation of the system's required functionality.
+____
+
+
+pg8000 is designed to be used with one thread per connection.
+
+Pg8000 communicates with the database using the
+https://www.postgresql.org/docs/current/protocol.html[PostgreSQL Frontend/Backend
+Protocol] (FEBE). If a query has no parameters, pg8000 uses the
+'simple query protocol'. If a query does have parameters, pg8000 uses the
+'extended query protocol' with unnamed prepared statements. The steps for a query with
+parameters are:
+
+. Query comes in.
+. Send a PARSE message to the server to create an unnamed prepared statement.
+. Send a BIND message to run against the unnamed prepared statement, resulting in an
+  unnamed portal on the server.
+. Send an EXECUTE message to read all the results from the portal.
+
+It's also possible to use named prepared statements. In which case the prepared
+statement persists on the server, and represented in pg8000 using a PreparedStatement
+object. This means that the PARSE step gets executed once up front, and then only the
+BIND and EXECUTE steps are repeated subsequently.
+
+There are a lot of PostgreSQL data types, but few primitive data types in Python. By
+default, pg8000 doesn't send PostgreSQL data type information in the PARSE step, in
+which case PostgreSQL assumes the types implied by the SQL statement. In some cases
+PostgreSQL can't work out a parameter type and so an
+https://www.postgresql.org/docs/current/static/sql-expressions.html#SQL-SYNTAX-TYPE-CASTS[explicit cast] can be used in the SQL.
+
+In the FEBE protocol, each query parameter can be sent to the server either as binary
+or text according to the format code. In pg8000 the parameters are always sent as text.
+
+* PostgreSQL has +/-infinity values for dates and timestamps, but Python does not.
+  Pg8000 handles this by returning +/-infinity strings in results, and in parameters
+  the strings +/- infinity can be used.
+
+* PostgreSQL dates/timestamps can have values outside the range of Python datetimes.
+  These are handled using the underlying PostgreSQL storage method. I don't know of any
+  users of pg8000 that use this feature, so get in touch if it affects you.
+
+* Occasionally, the network connection between pg8000 and the server may go
+  down. If pg8000 encounters a network problem it'll raise an `InterfaceError`
+  with an error message starting with `network error` and with the original
+  exception set as the
+  https://docs.python.org/3/reference/simple_stmts.html#the-raise-statement[cause].
+
+
+== Native API Docs
+
+=== pg8000.native.Connection(user, host='localhost', database=None, port=5432, password=None, source_address=None, unix_sock=None, ssl_context=None, timeout=None, tcp_keepalive=True, application_name=None, replication=None)
+
+Creates a connection to a PostgreSQL database.
+
+user::
+  The username to connect to the PostgreSQL server with. If your server
+  character encoding is not `ascii` or `utf8`, then you need to provide
+  `user` as bytes, eg. `'my_name'.encode('EUC-JP')`.
+
+host::
+  The hostname of the PostgreSQL server to connect with. Providing this
+  parameter is necessary for TCP/IP connections. One of either `host` or
+  `unix_sock` must be provided. The default is `localhost`.
+
+database::
+  The name of the database instance to connect with. If `None` then the
+  PostgreSQL server will assume the database name is the same as the username.
+  If your server character encoding is not `ascii` or `utf8`, then you need to
+  provide `database` as bytes, eg. `'my_db'.encode('EUC-JP')`.
+
+port::
+  The TCP/IP port of the PostgreSQL server instance.  This parameter defaults
+  to `5432`, the registered common port of PostgreSQL TCP/IP servers.
+
+password::
+  The user password to connect to the server with. This parameter is optional;
+  if omitted and the database server requests password-based authentication,
+  the connection will fail to open. If this parameter is provided but not
+  requested by the server, no error will occur. +
+   +
+  If your server character encoding is not `ascii` or `utf8`, then
+  you need to provide `password` as bytes, eg.
+  `'my_password'.encode('EUC-JP')`.
+
+
+source_address::
+  The source IP address which initiates the connection to the PostgreSQL server.
+  The default is `None` which means that the operating system will choose the
+  source address.
+
+unix_sock::
+  The path to the UNIX socket to access the database through, for example,
+  `'/tmp/.s.PGSQL.5432'`. One of either `host` or `unix_sock` must be provided.
+
+ssl_context::
+  This governs SSL encryption for TCP/IP sockets. It can have three values:
+    * `None`, meaning no SSL (the default)
+    * `True`, means use SSL with an
+       https://docs.python.org/3/library/ssl.html#ssl.SSLContext[`ssl.SSLContext`]
+       created using
+      https://docs.python.org/3/library/ssl.html#ssl.create_default_context[`ssl.create_default_context()`]
+    *  An instance of
+       https://docs.python.org/3/library/ssl.html#ssl.SSLContext[`ssl.SSLContext`]
+       which will be used to create the SSL connection. +
+       +
+       If your PostgreSQL server is behind an SSL proxy, you can set the
+       pg8000-specific attribute `ssl.SSLContext.request_ssl = False`, which
+       tells pg8000 to use an SSL socket, but not to request SSL from the
+       PostgreSQL server. Note that this means you can't use SCRAM
+       authentication with channel binding.
+
+timeout::
+  This is the time in seconds before the connection to the server will time
+  out. The default is `None` which means no timeout.
+
+tcp_keepalive::
+  If `True` then use
+  https://en.wikipedia.org/wiki/Keepalive#TCP_keepalive[TCP keepalive]. The
+  default is `True`.
+
+application_name::
+  Sets the https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-APPLICATION-NAME[application_name]. If your server character encoding is not
+  `ascii` or `utf8`, then you need to provide values as bytes, eg.
+  `'my_application_name'.encode('EUC-JP')`. The default is `None` which means
+  that the server will set the application name.
+
+replication::
+  Used to run in https://www.postgresql.org/docs/12/protocol-replication.html[streaming replication mode].
+  If your server character encoding is not `ascii` or `utf8`, then you need to
+  provide values as bytes, eg. `'database'.encode('EUC-JP')`.
+
+
+=== pg8000.native.Error
+
+Generic exception that is the base exception of the other error exceptions.
+
+
+=== pg8000.native.InterfaceError
+
+For errors that originate within pg8000.
+
+
+=== pg8000.native.DatabaseError
+
+For errors that originate from the server.
+
+=== pg8000.native.Connection.notifications
+
+A deque of server-side
+https://www.postgresql.org/docs/current/sql-notify.html[notifications] received
+by this database connection (via the LISTEN / NOTIFY PostgreSQL commands). Each
+list item is a three-element tuple containing the PostgreSQL backend PID that
+issued the notify, the channel and the payload.
+
+
+=== pg8000.native.Connection.notices
+
+A deque of server-side notices received by this database connection.
+
+
+=== pg8000.native.Connection.parameter_statuses
+
+A deque of server-side parameter statuses received by this database connection.
+
+
+=== pg8000.native.Connection.run(sql, stream=None, types=None, **kwargs)
+
+Executes an sql statement, and returns the results as a `list`. For example:
+
+`con.run("SELECT * FROM cities where population > :pop", pop=10000)`
+
+sql::
+  The SQL statement to execute. Parameter placeholders appear as a `:` followed
+  by the parameter name.
+
+stream::
+  For use with the PostgreSQL
+http://www.postgresql.org/docs/current/static/sql-copy.html[COPY] command. For
+a `COPY FROM` the parameter must be a readable file-like object, and for
+`COPY TO` it must be writable.
+
+types::
+  A dictionary of oids. A key corresponds to a parameter. 
+
+kwargs::
+  The parameters of the SQL statement.
+
+
+=== pg8000.native.Connection.row_count
+
+This read-only attribute contains the number of rows that the last `run()`
+method produced (for query statements like `SELECT`) or affected (for
+modification statements like `UPDATE`.
+
+The value is -1 if:
+
+* No `run()` method has been performed yet.
+* There was no rowcount associated with the last `run()`.
+* Using a `SELECT` query statement on a PostgreSQL server older than version
+  9.
+* Using a `COPY` query statement on PostgreSQL server version 8.1 or older.
+
+
+=== pg8000.native.Connection.columns
+
+A list of column metadata. Each item in the list is a dictionary with the
+following keys:
+
+* name
+* table_oid
+* column_attrnum
+* type_oid
+* type_size
+* type_modifier
+* format
+
+
+=== pg8000.native.Connection.close()
+
+Closes the database connection.
+
+
+=== pg8000.native.Connection.register_out_adapter(typ, oid, out_func)
+
+Register a type adapter for types going out from pg8000 to the server.
+
+typ::
+  The Python class that the adapter is for.
+
+oid::
+  The PostgreSQL type identifier found in the
+  https://www.postgresql.org/docs/current/catalog-pg-type.html[pg_type system
+  calalog].
+
+out_func::
+  A function that takes the Python object and returns its string representation
+  in the format that the server requires.
+
+
+=== pg8000.native.Connection.register_in_adapter(oid, in_func)
+
+Register a type adapter for types coming in from the server to pg8000.
+
+oid::
+  The PostgreSQL type identifier found in the
+  https://www.postgresql.org/docs/current/catalog-pg-type.html[pg_type system
+  calalog].
+
+in_func::
+  A function that takes the PostgreSQL string representation and returns
+  a corresponding Python object.
+
+
+=== pg8000.native.Connection.prepare(sql)
+
+Returns a PreparedStatement object which represents a
+https://www.postgresql.org/docs/current/sql-prepare.html[prepared statement] on
+the server. It can subsequently be repeatedly executed as shown in the
+<<_prepared_statements, example>>.
+
+sql::
+  The SQL statement to prepare. Parameter placeholders appear as a `:` followed
+  by the parameter name.
+
+
+=== pg8000.native.PreparedStatement
+
+A prepared statement object is returned by the
+`pg8000.native.Connection.prepare()` method of a connection. It has the
+following methods:
+
+
+=== pg8000.native.PreparedStatement.run(**kwargs)
+
+Executes the prepared statement, and returns the results as a `tuple`.
+
+kwargs::
+  The parameters of the prepared statement.
+
+
+=== pg8000.native.PreparedStatement.close()
+
+Closes the prepared statement, releasing the prepared statement held on the
+server.
+
+
+== DB-API 2 Docs
+
+
+=== Properties
+
+
+==== pg8000.dbapi.apilevel
+
+The DBAPI level supported, currently "2.0".
+
+This property is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.threadsafety
+
+Integer constant stating the level of thread safety the DBAPI interface
+supports. For pg8000, the threadsafety value is 1, meaning that threads may
+share the module but not connections.
+
+This property is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+==== pg8000.dbapi.paramstyle
+
+String property stating the type of parameter marker formatting expected by
+the interface.  This value defaults to "format", in which parameters are
+marked in this format: "WHERE name=%s".
+
+This property is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+As an extension to the DBAPI specification, this value is not constant; it
+can be changed to any of the following values:
+
+qmark::
+  Question mark style, eg. `WHERE name=?`
+
+numeric::
+  Numeric positional style, eg. `WHERE name=:1`
+
+named::
+  Named style, eg. `WHERE name=:paramname`
+
+format::
+  printf format codes, eg. `WHERE name=%s`
+
+pyformat::
+  Python format codes, eg. `WHERE name=%(paramname)s`
+
+
+==== pg8000.dbapi.STRING
+
+String type oid.
+
+==== pg8000.dbapi.BINARY
+
+
+==== pg8000.dbapi.NUMBER
+
+Numeric type oid.
+
+
+==== pg8000.dbapi.DATETIME
+
+Timestamp type oid
+
+
+==== pg8000.dbapi.ROWID
+
+ROWID type oid
+
+
+=== Functions
+
+==== pg8000.dbapi.connect(user, host='localhost', database=None, port=5432, password=None, source_address=None, unix_sock=None, ssl_context=None, timeout=None, tcp_keepalive=True, application_name=None, replication=None)
+
+Creates a connection to a PostgreSQL database.
+
+This property is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+user::
+  The username to connect to the PostgreSQL server with. If your server
+  character encoding is not `ascii` or `utf8`, then you need to provide
+  `user` as bytes, eg. `'my_name'.encode('EUC-JP')`.
+
+host::
+  The hostname of the PostgreSQL server to connect with. Providing this
+  parameter is necessary for TCP/IP connections. One of either `host` or
+  `unix_sock` must be provided. The default is `localhost`.
+
+database::
+  The name of the database instance to connect with. If `None` then the
+  PostgreSQL server will assume the database name is the same as the username.
+  If your server character encoding is not `ascii` or `utf8`, then you need to
+  provide `database` as bytes, eg. `'my_db'.encode('EUC-JP')`.
+
+port::
+  The TCP/IP port of the PostgreSQL server instance.  This parameter defaults
+  to `5432`, the registered common port of PostgreSQL TCP/IP servers.
+
+password::
+  The user password to connect to the server with. This parameter is optional;
+  if omitted and the database server requests password-based authentication,
+  the connection will fail to open. If this parameter is provided but not
+  requested by the server, no error will occur. +
+   +
+  If your server character encoding is not `ascii` or `utf8`, then
+  you need to provide `password` as bytes, eg.
+  `'my_password'.encode('EUC-JP')`.
+
+
+source_address::
+  The source IP address which initiates the connection to the PostgreSQL server.
+  The default is `None` which means that the operating system will choose the
+  source address.
+
+unix_sock::
+  The path to the UNIX socket to access the database through, for example,
+  `'/tmp/.s.PGSQL.5432'`. One of either `host` or `unix_sock` must be provided.
+
+ssl_context::
+  This governs SSL encryption for TCP/IP sockets. It can have three values:
+    * `None`, meaning no SSL (the default)
+    * `True`, means use SSL with an
+       https://docs.python.org/3/library/ssl.html#ssl.SSLContext[`ssl.SSLContext`]
+       created using
+      https://docs.python.org/3/library/ssl.html#ssl.create_default_context[`ssl.create_default_context()`]
+    *  An instance of
+       https://docs.python.org/3/library/ssl.html#ssl.SSLContext[`ssl.SSLContext`]
+       which will be used to create the SSL connection. +
+       +
+       If your PostgreSQL server is behind an SSL proxy, you can set the
+       pg8000-specific attribute `ssl.SSLContext.request_ssl = False`, which
+       tells pg8000 to use an SSL socket, but not to request SSL from the
+       PostgreSQL server. Note that this means you can't use SCRAM
+       authentication with channel binding.
+
+timeout::
+  This is the time in seconds before the connection to the server will time
+  out. The default is `None` which means no timeout.
+
+tcp_keepalive::
+  If `True` then use
+  https://en.wikipedia.org/wiki/Keepalive#TCP_keepalive[TCP keepalive]. The
+  default is `True`.
+
+application_name::
+  Sets the https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-APPLICATION-NAME[application_name]. If your server character encoding is not
+  `ascii` or `utf8`, then you need to provide values as bytes, eg.
+  `'my_application_name'.encode('EUC-JP')`. The default is `None` which means
+  that the server will set the application name.
+
+replication::
+  Used to run in https://www.postgresql.org/docs/12/protocol-replication.html[streaming replication mode].
+  If your server character encoding is not `ascii` or `utf8`, then you need to
+  provide values as bytes, eg. `'database'.encode('EUC-JP')`.
+
+
+==== pg8000.dbapi.Date(year, month, day)
+
+Constuct an object holding a date value.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: `datetime.date`
+
+
+==== pg8000.dbapi.Time(hour, minute, second)
+
+Construct an object holding a time value.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: `datetime.time`
+
+
+==== pg8000.dbapi.Timestamp(year, month, day, hour, minute, second)
+
+Construct an object holding a timestamp value.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: `datetime.datetime`
+
+
+==== pg8000.dbapi.DateFromTicks(ticks)
+
+Construct an object holding a date value from the given ticks value (number of
+seconds since the epoch).
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: `datetime.datetime`
+
+
+==== pg8000.dbapi.TimeFromTicks(ticks)
+
+Construct an objet holding a time value from the given ticks value (number of
+seconds since the epoch).
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: `datetime.time`
+
+
+==== pg8000.dbapi.TimestampFromTicks(ticks)
+
+Construct an object holding a timestamp value from the given ticks value
+(number of seconds since the epoch).
+
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: `datetime.datetime`
+
+
+==== pg8000.dbapi.Binary(value)
+
+Construct an object holding binary data.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: `bytes`.
+
+
+=== Generic Exceptions
+
+Pg8000 uses the standard DBAPI 2.0 exception tree as "generic" exceptions.
+Generally, more specific exception types are raised; these specific exception
+types are derived from the generic exceptions.
+
+==== pg8000.dbapi.Warning
+
+Generic exception raised for important database warnings like data truncations.
+This exception is not currently used by pg8000.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+==== pg8000.dbapi.Error
+
+Generic exception that is the base exception of all other error exceptions.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.InterfaceError
+
+Generic exception raised for errors that are related to the database interface
+rather than the database itself. For example, if the interface attempts to use
+an SSL connection but the server refuses, an InterfaceError will be raised.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.DatabaseError
+
+Generic exception raised for errors that are related to the database. This
+exception is currently never raised by pg8000.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.DataError
+
+Generic exception raised for errors that are due to problems with the processed
+data. This exception is not currently raised by pg8000.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.OperationalError
+
+Generic exception raised for errors that are related to the database's
+operation and not necessarily under the control of the programmer. This
+exception is currently never raised by pg8000.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.IntegrityError
+
+Generic exception raised when the relational integrity of the database is
+affected. This exception is not currently raised by pg8000.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.InternalError
+
+Generic exception raised when the database encounters an internal error. This
+is currently only raised when unexpected state occurs in the pg8000 interface
+itself, and is typically the result of a interface bug.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.ProgrammingError
+
+Generic exception raised for programming errors.  For example, this exception
+is raised if more parameter fields are in a query string than there are
+available parameters.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+==== pg8000.dbapi.NotSupportedError
+
+Generic exception raised in case a method or database API was used which is not
+supported by the database.
+
+This exception is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+=== Classes
+
+
+==== pg8000.dbapi.Connection
+
+A connection object is returned by the `pg8000.connect()` function. It
+represents a single physical connection to a PostgreSQL database.
+
+===== pg8000.dbapi.Connection.notifications
+
+A deque of server-side
+https://www.postgresql.org/docs/current/sql-notify.html[notifications] received
+by this database connection (via the LISTEN / NOTIFY PostgreSQL commands). Each
+list item is a three-element tuple containing the PostgreSQL backend PID that
+issued the notify, the channel and the payload.
+
+This attribute is not part of the DBAPI standard; it is a pg8000 extension.
+
+
+===== pg8000.dbapi.Connection.notices
+
+A deque of server-side notices received by this database connection.
+
+This attribute is not part of the DBAPI standard; it is a pg8000 extension.
+
+
+===== pg8000.dbapi.Connection.parameter_statuses
+
+A deque of server-side parameter statuses received by this database connection.
+
+This attribute is not part of the DBAPI standard; it is a pg8000 extension.
+
+
+===== pg8000.dbapi.Connection.autocommit
+
+Following the DB-API specification, autocommit is off by default. It can be
+turned on by setting this boolean pg8000-specific autocommit property to True.
+
+New in version 1.9.
+
+
+===== pg8000.dbapi.Connection.close()
+
+Closes the database connection.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Connection.cursor()
+
+Creates a `pg8000.Cursor` object bound to this connection.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Connection.rollback()
+
+Rolls back the current database transaction.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Connection.tpc_begin(xid)
+
+Begins a TPC transaction with the given transaction ID xid. This method should
+be called outside of a transaction (i.e. nothing may have executed since the
+last `commit()`  or `rollback()`. Furthermore, it is an error to call
+`commit()` or `rollback()` within the TPC transaction. A `ProgrammingError` is
+raised, if the application calls `commit()` or `rollback()` during an active
+TPC transaction.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Connection.tpc_commit(xid=None)
+
+When called with no arguments, `tpc_commit()` commits a TPC transaction
+previously prepared with `tpc_prepare()`. If `tpc_commit()` is called prior to
+`tpc_prepare()`, a single phase commit is performed. A transaction manager may
+choose to do this if only a single resource is participating in the global
+transaction.
+
+When called with a transaction ID `xid`, the database commits the given
+transaction. If an invalid transaction ID is provided, a
+ProgrammingError will be raised. This form should be called outside of
+a transaction, and is intended for use in recovery.
+
+On return, the TPC transaction is ended.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Connection.tpc_prepare()
+
+Performs the first phase of a transaction started with .tpc_begin(). A
+ProgrammingError is be raised if this method is called outside of a TPC
+transaction.
+
+After calling `tpc_prepare()`, no statements can be executed until
+`tpc_commit()` or `tpc_rollback()` have been called.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Connection.tpc_recover()
+
+Returns a list of pending transaction IDs suitable for use with
+`tpc_commit(xid)` or `tpc_rollback(xid)`
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Connection.tpc_rollback(xid=None)
+
+When called with no arguments, `tpc_rollback()` rolls back a TPC transaction.
+It may be called before or after `tpc_prepare()`.
+
+When called with a transaction ID xid, it rolls back the given transaction. If
+an invalid transaction ID is provided, a `ProgrammingError` is raised. This
+form should be called outside of a transaction, and is intended for use in
+recovery.
+
+On return, the TPC transaction is ended.
+
+This function is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+===== pg8000.dbapi.Connection.xid(format_id, global_transaction_id, branch_qualifier)
+
+Create a Transaction IDs (only global_transaction_id is used in pg) format_id
+and branch_qualifier are not used in postgres global_transaction_id may be any
+string identifier supported by postgres returns a tuple (format_id,
+global_transaction_id, branch_qualifier)
+
+
+==== pg8000.dbapi.Cursor
+
+A cursor object is returned by the `pg8000.dbapi.Connection.cursor()` method
+of a connection. It has the following attributes and methods:
+
+===== pg8000.dbapi.Cursor.arraysize
+
+This read/write attribute specifies the number of rows to fetch at a time with
+`pg8000.dbapi.Cursor.fetchmany()`.  It defaults to 1.
+
+
+===== pg8000.dbapi.Cursor.connection
+
+This read-only attribute contains a reference to the connection object
+(an instance of `pg8000.dbapi.Connection`) on which the cursor was created.
+
+This attribute is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Cursor.rowcount
+
+This read-only attribute contains the number of rows that the last
+`execute()` or `executemany()` method produced (for query statements like
+`SELECT`) or affected (for modification statements like `UPDATE`.
+
+The value is -1 if:
+
+* No `execute()` or `executemany()` method has been performed yet on the
+  cursor.
+* There was no rowcount associated with the last `execute()`.
+* At least one of the statements executed as part of an `executemany()` had no
+  row count associated with it.
+* Using a `SELECT` query statement on a PostgreSQL server older than version
+  9.
+* Using a `COPY` query statement on PostgreSQL server version 8.1 or older.
+
+This attribute is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Cursor.description
+
+This read-only attribute is a sequence of 7-item sequences. Each value contains
+information describing one result column. The 7 items returned for each column
+are (name, type_code, display_size, internal_size, precision, scale, null_ok).
+Only the first two values are provided by the current implementation.
+
+This attribute is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Cursor.close()
+
+Closes the cursor.
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Cursor.execute(operation, args=None, stream=None)
+
+Executes a database operation. Parameters may be provided as a sequence, or as
+a mapping, depending upon the value of `pg8000.paramstyle`. Returns the cursor,
+which may be iterated over.
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+operation::
+  The SQL statement to execute.
+
+args::
+  If `pg8000.dbapi.paramstyle` is `qmark`, `numeric`, or `format`, this
+argument should be an array of parameters to bind into the statement. If
+`pg8000.dbapi.paramstyle` is `named`, the argument should be a `dict` mapping of
+parameters. If `pg8000.dbapi.paramstyle' is `pyformat`, the argument value may be
+either an array or a mapping.
+
+stream::
+  This is a pg8000 extension for use with the PostgreSQL
+http://www.postgresql.org/docs/current/static/sql-copy.html[COPY] command. For
+a `COPY FROM` the parameter must be a readable file-like object, and for
+`COPY TO` it must be writable.
+
+New in version 1.9.11.
+
+
+===== pg8000.dbapi.Cursor.executemany(operation, param_sets)
+
+Prepare a database operation, and then execute it against all parameter
+sequences or mappings provided.
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+operation::
+  The SQL statement to execute.
+parameter_sets::
+  A sequence of parameters to execute the statement with. The values in the
+  sequence should be sequences or mappings of parameters, the same as the args
+  argument of the `pg8000.dbapi.Cursor.execute()` method.
+
+
+===== pg8000.dbapi.Cursor.callproc(procname, parameters=None)
+
+Call a stored database procedure with the given name and optional parameters.
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+procname::
+  The name of the procedure to call.
+
+parameters::
+  A list of parameters.
+
+
+===== pg8000.dbapi.Cursor.fetchall()
+
+Fetches all remaining rows of a query result.
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: A sequence, each entry of which is a sequence of field values making
+up a row.
+
+
+===== pg8000.dbapi.Cursor.fetchmany(size=None)
+
+Fetches the next set of rows of a query result.
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+size::
+  The number of rows to fetch when called.  If not provided, the
+  `pg8000.dbapi.Cursor.arraysize` attribute value is used instead.
+
+Returns: A sequence, each entry of which is a sequence of field values making
+up a row.  If no more rows are available, an empty sequence will be returned.
+
+
+===== pg8000.dbapi.Cursor.fetchone()
+
+Fetch the next row of a query result set.
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+Returns: A row as a sequence of field values, or `None` if no more rows are
+available.
+
+
+===== pg8000.dbapi.Cursor.setinputsizes(*sizes)
+
+Used to set the parameter types of the next query. This is useful if it's
+difficult for pg8000 to work out the types from the parameters themselves
+(eg. for parameters of type None).
+
+sizes::
+  Positional parameters that are either the Python type of the parameter to be
+  sent, or the PostgreSQL oid. Common oids are available as constants such as
+  pg8000.STRING, pg8000.INTEGER, pg8000.TIME etc.
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification].
+
+
+===== pg8000.dbapi.Cursor.setoutputsize(size, column=None)
+
+This method is part of the
+http://www.python.org/dev/peps/pep-0249/[DBAPI 2.0 specification], however, it
+is not implemented by pg8000.
+
+
+==== pg8000.dbapi.Interval
+
+An Interval represents a measurement of time.  In PostgreSQL, an interval is
+defined in the measure of months, days, and microseconds; as such, the pg8000
+interval type represents the same information.
+
+Note that values of the `pg8000.Interval.microseconds`, `pg8000.Interval.days`,
+and `pg8000.Interval.months` properties are independently measured and cannot
+be converted to each other. A month may be 28, 29, 30, or 31 days, and a day
+may occasionally be lengthened slightly by a leap second.
+
+
+===== pg8000.dbapi.Interval.microseconds
+
+Measure of microseconds in the interval.
+
+The microseconds value is constrained to fit into a signed 64-bit integer. Any
+attempt to set a value too large or too small will result in an OverflowError
+being raised.
+
+
+===== pg8000.dbapi.Interval.days
+
+Measure of days in the interval.
+
+The days value is constrained to fit into a signed 32-bit integer. Any attempt
+to set a value too large or too small will result in an OverflowError being
+raised.
+
+
+===== pg8000.dbapi.Interval.months
+
+Measure of months in the interval.
+
+The months value is constrained to fit into a signed 32-bit integer. Any
+attempt to set a value too large or too small will result in an OverflowError
+being raised.
+
+
+== Tests
+
+* Install http://testrun.org/tox/latest/[tox]: `pip install tox`
+* Enable the PostgreSQL hstore extension by running the SQL command:
+  `create extension hstore;`
+* Add a line to pg_hba.conf for the various authentication options:
+
+....
+host    pg8000_md5           all        127.0.0.1/32            md5
+host    pg8000_gss           all        127.0.0.1/32            gss
+host    pg8000_password      all        127.0.0.1/32            password
+host    pg8000_scram_sha_256 all        127.0.0.1/32            scram-sha-256
+host    all                  all        127.0.0.1/32            trust
+....
+
+* Set password encryption to `scram-sha-256` in `postgresql.conf`:
+  `password_encryption = 'scram-sha-256'`
+* Set the password for the postgres user: `ALTER USER postgresql WITH PASSWORD 'pw';`
+* Run `tox` from the `pg8000` directory: `tox`
+
+This will run the tests against the Python version of the virtual environment,
+on the machine, and the installed PostgreSQL version listening on port 5432, or
+the `PGPORT` environment variable if set.
+
+Benchmarks are run as part of the test suite at `tests/test_benchmarks.py`.
+
+
+== Doing A Release Of pg8000
+
+Run `tox` to make sure all tests pass, then update the release notes, then do:
+
+....
+git tag -a x.y.z -m "version x.y.z"
+rm -r build
+rm -r dist
+python setup.py sdist bdist_wheel --python-tag py3
+for f in dist/*; do gpg --detach-sign -a $f; done
+twine upload dist/*
+....
+
+
+== Release Notes
+
+=== Version 1.23.0, 2021-11-13
+
+* If a query has no parameters, then the query will no longer be parsed. Although there
+  are performance benefits for doing this, the main reason is to avoid query rewriting,
+  which can introduce errors.
+
+
+=== Version 1.22.1, 2021-11-10
+
+* Fix bug in PGInterval type where `str()` failed for a millennia value.
+
+
+=== Version 1.22.0, 2021-10-13
+
+* Rather than specifying the oids in the `Parse` step of the Postgres protocol, pg8000
+  now omits them, and so Postgres will use the oids it determines from the query. This
+  makes the pg8000 code simplier and also it should also make the nuances of type
+  matching more straightforward.
+
+
+=== Version 1.21.3, 2021-10-10
+
+* Legacy prepared statement fails if the result is null. Thanks to Carlos
+  https://github.com/carlkid1499 for reporting this.
+
+* For the currency part of the pg8000 test suite, add `C.UTF8` as a supported `LANG`.
+
+
+=== Version 1.21.2, 2021-09-14
+
+* The `executemany()` method fails if the `param_sets` parameter is empty. Thanks to
+  https://github.com/GKTheOne for reporting this.
+
+
+=== Version 1.21.1, 2021-08-25
+
+* Require Scramp version 1.4.1 or higher so that pg8000 can cope with SCRAM with
+  channel binding with certificates using a `sha512` hash algorithm.
+
+
+=== Version 1.21.0, 2021-07-31
+
+* For some SQL statements the server doesn't send back a result set (note that no
+  result set is different from a result set with zero rows). Previously we didn't
+  distinguish between no results and zero rows, but now we do. For `pg8000.dbapi` it
+  means that an exception is raised if `fetchall()` is called when no results have been
+  returned, bringing pg8000 into line with the DBAPI 2 standard. For `pg8000.native`
+  this means that `run()` returns None if there is no result.
+
+
+=== Version 1.20.0, 2021-07-03
+
+* Allow text stream as 'stream' parameter in run(). Previously we only allowed a bytes 
+  stream, but now the stream can be a text stream, in which case pg8000 handles the 
+  encodings.
+
+
+=== Version 1.19.5, 2021-05-18
+
+* Handle invalid character encodings in error strings from the server.
+
+
+=== Version 1.19.4, 2021-05-03
+
+* A FLUSH message should only be send after an extended-query message, but pg8000 was
+  sending it at other times as well. This affected AWS RDS Proxy.
+
+
+=== Version 1.19.3, 2021-04-24
+
+* The type (oid) of integer arrays wasn't being detected correctly. It was only going
+  by the first element, but it should look at all the items. That's fixed now.
+
+
+=== Version 1.19.2, 2021-04-07
+
+* In version 1.19.1 we tried to parse the PostgreSQL `MONEY` type to return a `Decimal`
+  but since the format of `MONEY` is locale-dependent this is too difficult and
+  unreliable and so now we revert to returning a `str`.
+
+
+=== Version 1.19.1, 2021-04-03
+
+* Fix bug where setinputsizes() was only used for the first parameter set of
+  executemany().
+
+* Support more PostgreSQL array types.
+
+
+=== Version 1.19.0, 2021-03-28
+
+* Network error exceptions are now wrapped in an `InterfaceError`, with the
+  original exception as the cause. The error message for network errors allways
+  start with the string `network error`.
+
+* Upgraded to version 1.3.0 of Scramp, which has better error handling.
+
+
+=== Version 1.18.0, 2021-03-06
+
+* The `pg8000.dbapi.Cursor.callproc()` method is now implemented.
+
+* SCRAM channel binding is now supported. That means SCRAM mechanisms ending in
+  '-PLUS' such as SCRAM-SHA-256-PLUS are now supported when connecting to the
+  server.
+
+* A custom attribute `ssl.SSLContext.request_ssl` can be set to `False` to
+  tell pg8000 to connect using an SSL socket, but to not request SSL from
+  the PostgreSQL server. This is useful if you're connecting to a PostgreSQL
+  server that's behind an SSL proxy.
+
+
+=== Version 1.17.0, 2021-01-30
+
+* The API is now split in two, pg8000.native and pg8000.dbapi. The legacy API
+  still exists in this release, but will be removed in another release. The
+  idea is that pg8000.dbapi can stick strictly to the DB-API 2 specification,
+  while pg8000.native can focus on useability without having to worry about
+  compatibility with the DB-API standard.
+
+* The column name in `Connection.description` used to be returned as a
+  `bytes` but now it's returned as a `str`.
+
+* Removed extra wrapper types PGJson, PGEnum etc. These were never properly
+  documented and the problem they solve can be solved using CAST in the SQL or
+  by using setinputsizes.
+
+
+
+=== Version 1.16.6, 2020-10-10
+
+* The column name in `Connection.description` used to be returned as a
+  `bytes` but now it's returned as a `str`.
+
+* Removed extra wrapper types PGJson, PGEnum etc. These were never properly
+  documented and the problem they solve can be solved using CAST in the SQL or
+  by using setinputsizes.
+
+
+=== Version 1.16.5, 2020-08-07
+
+* The TPC method `Connection.tpc_prepare()` was broken.
+
+
+=== Version 1.16.4, 2020-08-03
+
+* Include the `payload` in the tuples in `Connection.notifications`.
+* More constants (eg. `DECIMAL` and `TEXT_ARRAY`) are now available for
+  PostgreSQL types that are used in `setinputsizes()`.
+
+
+=== Version 1.16.3, 2020-07-26
+
+* If an unrecognized parameter is sent to `Cursor.setinputsizes()` use the
+  `pg8000.UNKNOWN` type (705).
+* When communicating with a PostgreSQL server with version < 8.2.0, `FETCH`
+  commands don't have a row count.
+* Include in the source distribution all necessary test files from the `test`
+  directory in
+
+
+=== Version 1.16.2, 2020-07-25
+
+* Use the
+  https://www.postgresql.org/docs/current/protocol-flow.html#id-1.10.5.7.4[simple query]
+  cycle for queries that don't have parameters. This should give a performance
+  improvement and also means that multiple statements can be executed in one go
+  (as long as they don't have parameters) whereas previously the `sqlparse` had
+  to be used.
+
+
+=== Version 1.16.1, 2020-07-18
+
+* Enable the `Cursor.setinputsizes()` method. Previously this method didn't
+  do anything. It's an optional method of the DBAPI 2.0 specification.
+
+
+=== Version 1.16.0, 2020-07-11
+
+* This is a backwardly incompatible release of pg8000.
+
+* All data types are now sent as text rather than binary.
+
+* Using adapters, custom types can be plugged in to pg8000.
+
+* Previously, named prepared statements were used for all statements.
+  Now unnamed prepared statements are used by default, and named prepared
+  statements can be used explicitly by calling the Connection.prepare()
+  method, which returns a PreparedStatement object.
+
+
+=== Version 1.15.3, 2020-06-14
+
+* For TCP connections (as opposed to Unix socket connections) the
+  https://docs.python.org/3/library/socket.html#socket.create_connection[`socket.create_connection`]
+  function is now used. This means pg8000 now works with IPv6 as well as IPv4.
+
+* Better error messages for failed connections. A 'cause' exception is now
+  added to the top-level pg8000 exception, and the error message contains the
+  details of what was being connected to (host, port etc.).
+
+
+=== Version 1.15.2, 2020-04-16
+
+* Added a new method `run()` to the connection, which lets you run queries
+  directly without using a `Cursor`. It always uses the `named` parameter
+  style, and the parameters are provided using keyword arguments. There are now
+  two sets of interactive examples, one using the pg8000 extensions, and one
+  using just DB-API features.
+
+* Better error message if certain parameters in the `connect()` function are of
+  the wrong type.
+
+* The constructor of the `Connection` class now has the same signature as the
+  `connect()` function, which makes it easier to use the `Connection` class
+  directly if you want to.
+
+
+=== Version 1.15.1, 2020-04-04
+
+* Up to now the only supported way to create a new connection was to use the
+  `connect()` function. However, some people are using the `Connect` class
+  directly and this change makes it a bit easier to do that by making the class
+  use a contructor which has the same signature as the `connect()` function.
+
+
+=== Version 1.15.0, 2020-04-04
+
+* Abandon the idea of arbitrary `init_params` in the connect() function. We now
+  go back to having a fixed number of arguments. The argument `replication` has
+  been added as this is the only extra init param that was needed. The reason
+  for going back to a fixed number of aguments is that you get better feedback
+  if you accidently mis-type a parameter name.
+
+* The `max_prepared_statements` parameter has been moved from being a module
+  property to being an argument of the connect() function.
+
+
+=== Version 1.14.1, 2020-03-23
+
+* Ignore any `init_params` that have a value of `None`. This seems to be more
+  useful and the behaviour is more expected.
+
+
+=== Version 1.14.0, 2020-03-21
+
+* Tests are now included in the source distribution.
+
+* Any extra keyword parameters of the `connect()` function are sent as
+  initialization parameters when the PostgreSQL session starts. See the API
+  docs for more information. Thanks to Patrick Hayes for suggesting this.
+
+* The ssl.wrap_socket function is deprecated, so we now give the user the
+  option of using a default `SSLContext` or to pass in a custom one. This is a
+  backwardly incompatible change. See the API docs for more info. Thanks to
+  Jonathan Ross Rogers <jrogers@emphasys-software.com> for his work on this.
+
+* Oversized integers are now returned as a `Decimal` type, whereas before a
+  `None` was returned. Thanks to Igor Kaplounenko <igor.kaplounenko@intel.com>
+  for his work on this.
+
+* Allow setting of connection source address in the `connect()` function. See
+  the API docs for more details. Thanks to David King
+  <davidking@davids-mbp.home> for his work on this.
+
+
+=== Version 1.13.2, 2019-06-30
+
+* Use the https://pypi.org/project/scramp/[Scramp] library for the SCRAM
+  implementation.
+
+* Fixed bug where SQL such as `make_interval(days := 10)` fail on the `:=`
+  part. Thanks to https://github.com/sanepal[sanepal] for reporting this.
+
+
+=== Version 1.13.1, 2019-02-06
+
+* We weren't correctly uploading releases to PyPI, which led to confusion
+  when dropping Python 2 compatibility. Thanks to
+  https://github.com/piroux[Pierre Roux] for his
+  https://github.com/tlocke/pg8000/issues/7[detailed explanation] of what
+  went wrong and how to correct it.
+
+* Fixed bug where references to the `six` library were still in the code, even
+  though we don't use `six` anymore.
+
+
+=== Version 1.13.0, 2019-02-01
+
+* Remove support for Python 2.
+
+* Support the scram-sha-256 authentication protocol. Reading through the
+  https://github.com/cagdass/scrampy code was a great help in implementing
+  this, so thanks to https://github.com/cagdass[cagdass] for his code.
+
+
+=== Version 1.12.4, 2019-01-05
+
+* Support the PostgreSQL cast operator `::` in SQL statements.
+
+* Added support for more advanced SSL options. See docs on `connect` function
+  for more details.
+
+* TCP keepalives enabled by default, can be set in the `connect` function.
+
+* Fixed bug in array dimension calculation.
+
+* Can now use the `with` keyword with connection objects.
+
+
+=== Version 1.12.3, 2018-08-22
+
+* Make PGVarchar and PGText inherit from `str`. Simpler than inheriting from
+  a PGType.
+
+
+=== Version 1.12.2, 2018-06-28
+
+* Add PGVarchar and PGText wrapper types. This allows fine control over the
+  string type that is sent to PostgreSQL by pg8000.
+
+
+=== Version 1.12.1, 2018-06-12
+
+
+* Revert back to the Python 3 `str` type being sent as an `unknown` type,
+  rather than the `text` type as it was in the previous release. The reason is
+  that with the `unknown` type there's the convenience of using a plain Python
+  string for JSON, Enum etc. There's always the option of using the
+  `pg8000.PGJson` and `pg8000.PGEnum` wrappers if precise control over the
+  PostgreSQL type is needed.
+
+
+=== Version 1.12.0, 2018-06-12
+
+Note that this version is not backward compatible with previous versions.
+
+* The Python 3 `str` type was sent as an `unknown` type, but now it's sent as
+ the nearest PostgreSQL type `text`.
+
+* pg8000 now recognizes that inline SQL comments end with a newline.
+
+* Single `%` characters now allowed in SQL comments.
+
+* The wrappers `pg8000.PGJson`, `pg8000.PGJsonb` and `pg8000.PGTsvector` can
+  now be used to contain Python values to be used as parameters. The wrapper
+  `pg8000.PGEnum` can by used for Python 2, as it doesn't have a standard
+  `enum.Enum` type.
+
+
+=== Version 1.11.0, 2017-08-16
+
+Note that this version is not backward compatible with previous versions.
+
+* The Python `int` type was sent as an `unknown` type, but now it's sent as the
+  nearest matching PostgreSQL type. Thanks to Patrick Hayes.
+
+* Prepared statements are now closed on the server when pg8000 clears them from
+  its cache.
+
+* Previously a `%` within an SQL literal had to be escaped, but this is no
+  longer the case.
+
+* Notifications, notices and parameter statuses are now handled by simple
+  `dequeue` buffers. See docs for more details.
+
+* Connections and cursors are no longer threadsafe. So to be clear, neither
+  connections or cursors should be shared between threads. One thread per
+  connection is mandatory now. This has been done for performance reasons, and
+  to simplify the code.
+
+* Rather than reading results from the server in batches, pg8000 now always
+  downloads them in one go. This avoids `portal closed` errors and makes things
+  a bit quicker, but now one has to avoid downloading too many rows in a single
+  query.
+
+* Attempts to return something informative if the returned PostgreSQL timestamp
+  value is outside the range of the Python datetime.
+
+* Allow empty arrays as parameters, assume they're of string type.
+
+* The cursor now has a context manager, so it can be used with the `with`
+  keyword. Thanks to Ildar Musin.
+
+* Add support for `application_name` parameter when connecting to database,
+  issue https://github.com/mfenniak/pg8000/pull/106[#106]. Thanks to
+  https://github.com/vadv[@vadv] for the contribution.
+
+* Fix warnings from PostgreSQL "not in a transaction", when calling
+  ``.rollback()`` while not in a transaction, issue
+  https://github.com/mfenniak/pg8000/issues/113[#113]. Thanks to
+  https://github.com/jamadden[@jamadden] for the contribution.
+
+* Errors from the server are now always passed through in full.
+
+
+=== Version 1.10.6, 2016-06-10
+
+* Fixed a problem where we weren't handling the password connection parameter
+  correctly. Now it's handled in the same way as the 'user' and 'database'
+  parameters, ie. if the password is bytes, then pass it straight through to the
+  database, if it's a string then encode it with utf8.
+
+* It used to be that if the 'user' parameter to the connection function was
+  'None', then pg8000 would try and look at environment variables to find a
+  username. Now we just go by the 'user' parameter only, and give an error if
+  it's None.
+
+
+=== Version 1.10.5, 2016-03-04
+
+- Include LICENCE text and sources for docs in the source distribution (the
+  tarball).
+
+
+=== Version 1.10.4, 2016-02-27
+
+* Fixed bug where if a str is sent as a query parameter, and then with the same
+  cursor an int is sent instead of a string, for the same query, then it fails.
+
+* Under Python 2, a str type is now sent 'as is', ie. as a byte string rather
+  than trying to decode and send according to the client encoding. Under Python
+  2 it's recommended to send text as unicode() objects.
+
+* Dropped and added support for Python versions. Now pg8000 supports
+  Python 2.7+ and Python 3.3+.
+
+* Dropped and added support for PostgreSQL versions. Now pg8000 supports
+  PostgreSQL 9.1+.
+
+* pg8000 uses the 'six' library for making the same code run on both Python 2
+  and Python 3. We used to include it as a file in the pg8000 source code. Now
+  we have it as a separate dependency that's installed with 'pip install'. The
+  reason for doing this is that package maintainers for OS distributions
+  prefer unbundled libaries.
+
+
+=== Version 1.10.3, 2016-01-07
+
+* Removed testing for PostgreSQL 9.0 as it's not longer supported by the
+  PostgreSQL Global Development Group.
+* Fixed bug where pg8000 would fail with datetimes if PostgreSQL was compiled
+  with the integer_datetimes option set to 'off'. The bug was in the
+  timestamp_send_float function.
+
+
+=== Version 1.10.2, 2015-03-17
+
+* If there's a socket exception thrown when communicating with the database,
+  it is now wrapped in an OperationalError exception, to conform to the DB-API
+  spec.
+
+* Previously, pg8000 didn't recognize the EmptyQueryResponse (that the server
+  sends back if the SQL query is an empty string) now we raise a
+  ProgrammingError exception.
+
+* Added socket timeout option for Python 3.
+
+* If the server returns an error, we used to initialize the ProgramerException
+  with just the first three fields of the error. Now we initialize the
+  ProgrammerException with all the fields.
+
+* Use relative imports inside package.
+
+* User and database names given as bytes. The user and database parameters of
+  the connect() function are now passed directly as bytes to the server. If the
+  type of the parameter is unicode, pg8000 converts it to bytes using the uft8
+  encoding.
+
+* Added support for JSON and JSONB Postgres types. We take the approach of
+  taking serialized JSON (str) as an SQL parameter, but returning results as
+  de-serialized JSON (Python objects). See the example in the Quickstart.
+
+* Added CircleCI continuous integration.
+
+* String support in arrays now allow letters like "u", braces and whitespace.
+
+
+=== Version 1.10.1, 2014-09-15
+
+* Add support for the Wheel package format.
+
+* Remove option to set a connection timeout. For communicating with the server,
+  pg8000 uses a file-like object using socket.makefile() but you can't use this
+  if the underlying socket has a timeout.
+
+
+=== Version 1.10.0, 2014-08-30
+
+* Remove the old ``pg8000.dbapi`` and ``pg8000.DBAPI`` namespaces. For example,
+  now only ``pg8000.connect()`` will work, and ``pg8000.dbapi.connect()``
+  won't work any more.
+
+* Parse server version string with LooseVersion. This should solve the problems
+  that people have been having when using versions of PostgreSQL such as
+  ``9.4beta2``.
+
+* Message if portal suspended in autocommit. Give a proper error message if the
+  portal is suspended while in autocommit mode. The error is that the portal is
+  closed when the transaction is closed, and so in autocommit mode the portal
+  will be immediately closed. The bottom line is, don't use autocommit mode if
+  there's a chance of retrieving more rows than the cache holds (currently 100).
+
+
+=== Version 1.9.14, 2014-08-02
+
+* Make ``executemany()`` set ``rowcount``. Previously, ``executemany()`` would
+  always set ``rowcount`` to -1. Now we set it to a meaningful value if
+  possible. If any of the statements have a -1 ``rowcount`` then then the
+  ``rowcount`` for the ``executemany()`` is -1, otherwise the ``executemany()``
+  ``rowcount`` is the sum of the rowcounts of the individual statements.
+
+* Support for password authentication. pg8000 didn't support plain text
+  authentication, now it does.
+
+
+=== Version 1.9.13, 2014-07-27
+
+* Reverted to using the string ``connection is closed`` as the message of the
+  exception that's thrown if a connection is closed. For a few versions we were
+  using a slightly different one with capitalization and punctuation, but we've
+  reverted to the original because it's easier for users of the library to
+  consume.
+
+* Previously, ``tpc_recover()`` would start a transaction if one was not already
+  in progress. Now it won't.
+
+
+=== Version 1.9.12, 2014-07-22
+
+* Fixed bug in ``tpc_commit()`` where a single phase commit failed.
+
+
+=== Version 1.9.11, 2014-07-20
+
+* Add support for two-phase commit DBAPI extension. Thanks to Mariano Reingart's
+  TPC code on the Google Code version:
+
+  https://code.google.com/p/pg8000/source/detail?r=c8609701b348b1812c418e2c7
+
+  on which the code for this commit is based.
+
+* Deprecate ``copy_from()`` and ``copy_to()`` The methods ``copy_from()`` and
+  ``copy_to()`` of the ``Cursor`` object are deprecated because it's simpler and
+  more flexible to use the ``execute()`` method with a ``fileobj`` parameter.
+
+* Fixed bug in reporting unsupported authentication codes. Thanks to
+  https://github.com/hackgnar for reporting this and providing the fix.
+
+* Have a default for the ``user`` paramater of the ``connect()`` function. If
+  the ``user`` parameter of the ``connect()`` function isn't provided, look
+  first for the ``PGUSER`` then the ``USER`` environment variables. Thanks to
+  Alex Gaynor https://github.com/alex for this suggestion.
+
+* Before PostgreSQL 8.2, ``COPY`` didn't give row count. Until PostgreSQL 8.2
+  (which includes Amazon Redshift which forked at 8.0) the ``COPY`` command
+  didn't return a row count, but pg8000 thought it did. That's fixed now.
+
+
+=== Version 1.9.10, 2014-06-08
+
+* Remember prepared statements. Now prepared statements are never closed, and
+  pg8000 remembers which ones are on the server, and uses them when a query is
+  repeated. This gives an increase in performance, because on subsequent
+  queries the prepared statement doesn't need to be created each time.
+
+* For performance reasons, pg8000 never closed portals explicitly, it just
+  let the server close them at the end of the transaction. However, this can
+  cause memory problems for long running transactions, so now pg800 always
+  closes a portal after it's exhausted.
+
+* Fixed bug where unicode arrays failed under Python 2. Thanks to
+  https://github.com/jdkx for reporting this.
+
+* A FLUSH message is now sent after every message (except SYNC). This is in
+  accordance with the protocol docs, and ensures the server sends back its
+  responses straight away.
+
+
+=== Version 1.9.9, 2014-05-12
+
+* The PostgreSQL interval type is now mapped to datetime.timedelta where
+  possible. Previously the PostgreSQL interval type was always mapped to the
+  pg8000.Interval type. However, to support the datetime.timedelta type we
+  now use it whenever possible. Unfortunately it's not always possible because
+  timedelta doesn't support months. If months are needed then the fall-back
+  is the pg8000.Interval type. This approach means we handle timedelta in a
+  similar way to other Python PostgreSQL drivers, and it makes pg8000
+  compatible with popular ORMs like SQLAlchemy.
+
+* Fixed bug in executemany() where a new prepared statement should be created
+  for each variation in the oids of the parameter sets.
+
+
+=== Version 1.9.8, 2014-05-05
+
+* We used to ask the server for a description of the statement, and then ask
+  for a description of each subsequent portal. We now only ask for a
+  description of the statement. This results in a significant performance
+  improvement, especially for executemany() calls and when using the
+  'use_cache' option of the connect() function.
+
+* Fixed warning in Python 3.4 which was saying that a socket hadn't been
+  closed. It seems that closing a socket file doesn't close the underlying
+  socket.
+
+* Now should cope with PostgreSQL 8 versions before 8.4. This includes Amazon
+  Redshift.
+
+* Added 'unicode' alias for 'utf-8', which is needed for Amazon Redshift.
+
+* Various other bug fixes.
+
+
+=== Version 1.9.7, 2014-03-26
+
+* Caching of prepared statements. There's now a 'use_cache' boolean parameter
+  for the connect() function, which causes all prepared statements to be cached
+  by pg8000, keyed on the SQL query string. This should speed things up
+  significantly in most cases.
+
+* Added support for the PostgreSQL inet type. It maps to the Python types
+  IPv*Address and IPv*Network.
+
+* Added support for PostgreSQL +/- infinity date and timestamp values. Now the
+  Python value datetime.datetime.max maps to the PostgreSQL value 'infinity'
+  and datetime.datetime.min maps to '-infinity', and the same for
+  datetime.date.
+
+* Added support for the PostgreSQL types int2vector and xid, which are mostly
+  used internally by PostgreSQL.
+
+
+=== Version 1.9.6, 2014-02-26
+
+* Fixed a bug where 'portal does not exist' errors were being generated. Some
+  queries that should have been run in a transaction were run in autocommit
+  mode and so any that suspended a portal had the portal immediately closed,
+  because a portal can only exist within a transaction. This has been solved by
+  determining the transaction status from the READY_FOR_QUERY message.
+
+
+=== Version 1.9.5, 2014-02-15
+
+* Removed warn() calls for __next__() and __iter__(). Removing the warn() in
+  __next__() improves the performance tests by ~20%.
+
+* Increased performance of timestamp by ~20%. Should also improve timestamptz.
+
+* Moved statement_number and portal_number from module to Connection. This
+  should reduce lock contention for cases where there's a single module and
+  lots of connections.
+
+* Make decimal_out/in and time_in use client_encoding. These functions used to
+  assume ascii, and I can't think of a case where that wouldn't work.
+  Nonetheless, that theoretical bug is now fixed.
+
+* Fixed a bug in cursor.executemany(), where a non-None parameter in a sequence
+  of parameters, is None in a subsequent sequence of parameters.
+
+
+=== Version 1.9.4, 2014-01-18
+
+* Fixed a bug where with Python 2, a parameter with the value Decimal('12.44'),
+  (and probably other numbers) isn't sent correctly to PostgreSQL, and so the
+  command fails. This has been fixed by sending decimal types as text rather
+  than binary. I'd imagine it's slightly faster too.
+
+
+=== Version 1.9.3, 2014-01-16
+
+* Fixed bug where there were missing trailing zeros after the decimal point in
+  the NUMERIC type. For example, the NUMERIC value 1.0 was returned as 1 (with
+  no zero after the decimal point).
+
+  This is fixed this by making pg8000 use the text rather than binary
+  representation for the numeric type. This actually doubles the speed of
+  numeric queries.
+
+
+=== Version 1.9.2, 2013-12-17
+
+* Fixed incompatibility with PostgreSQL 8.4. In 8.4, the CommandComplete
+  message doesn't return a row count if the command is SELECT. We now look at
+  the server version and don't look for a row count for a SELECT with version
+  8.4.
+
+
+=== Version 1.9.1, 2013-12-15
+
+* Fixed bug where the Python 2 'unicode' type wasn't recognized in a query
+  parameter.
+
+
+=== Version 1.9.0, 2013-12-01
+
+* For Python 3, the :class:`bytes` type replaces the :class:`pg8000.Bytea`
+  type. For backward compatibility the :class:`pg8000.Bytea` still works under
+  Python 3, but its use is deprecated.
+
+* A single codebase for Python 2 and 3.
+
+* Everything (functions, properties, classes) is now available under the
+  ``pg8000`` namespace. So for example:
+
+  * pg8000.DBAPI.connect() -> pg8000.connect()
+  * pg8000.DBAPI.apilevel -> pg8000.apilevel
+  * pg8000.DBAPI.threadsafety -> pg8000.threadsafety
+  * pg8000.DBAPI.paramstyle -> pg8000.paramstyle
+  * pg8000.types.Bytea -> pg8000.Bytea
+  * pg8000.types.Interval -> pg8000.Interval
+  * pg8000.errors.Warning -> pg8000.Warning
+  * pg8000.errors.Error -> pg8000.Error
+  * pg8000.errors.InterfaceError -> pg8000.InterfaceError
+  * pg8000.errors.DatabaseError -> pg8000.DatabaseError
+
+  The old locations are deprecated, but still work for backward compatibility.
+
+* Lots of performance improvements.
+
+  * Faster receiving of ``numeric`` types.
+  * Query only parsed when PreparedStatement is created.
+  * PreparedStatement re-used in executemany()
+  * Use ``collections.deque`` rather than ``list`` for the row cache. We're
+    adding to one end and removing from the other. This is O(n) for a list but
+    O(1) for a deque.
+  * Find the conversion function and do the format code check in the
+    ROW_DESCRIPTION handler, rather than every time in the ROW_DATA handler.
+  * Use the 'unpack_from' form of struct, when unpacking the data row, so we
+    don't have to slice the data.
+  * Return row as a list for better performance. At the moment result rows are
+    turned into a tuple before being returned. Returning the rows directly as a
+    list speeds up the performance tests about 5%.
+  * Simplify the event loop. Now the main event loop just continues until a
+    READY_FOR_QUERY message is received. This follows the suggestion in the
+    Postgres protocol docs. There's not much of a difference in speed, but the
+    code is a bit simpler, and it should make things more robust.
+  * Re-arrange the code as a state machine to give > 30% speedup.
+  * Using pre-compiled struct objects. Pre-compiled struct objects are a bit
+    faster than using the struct functions directly. It also hopefully adds to
+    the readability of the code.
+  * Speeded up _send. Before calling the socket 'write' method, we were
+    checking that the 'data' type implements the 'buffer' interface (bytes or
+    bytearray), but the check isn't needed because 'write' raises an exception
+    if data is of the wrong type.
+
+
+* Add facility for turning auto-commit on. This follows the suggestion of
+  funkybob to fix the problem of not be able to execute a command such as
+  'create database' that must be executed outside a transaction. Now you can do
+  conn.autocommit = True and then execute 'create database'.
+
+* Add support for the PostgreSQL ``uid`` type. Thanks to Rad Cirskis.
+
+* Add support for the PostgreSQL XML type.
+
+* Add support for the PostgreSQL ``enum`` user defined types.
+
+* Fix a socket leak, where a problem opening a connection could leave a socket
+  open.
+
+* Fix empty array issue. https://github.com/mfenniak/pg8000/issues/10
+
+* Fix scale on ``numeric`` types. https://github.com/mfenniak/pg8000/pull/13
+
+* Fix numeric_send. Thanks to Christian Hofstaedtler.
+
+
+=== Version 1.08, 2010-06-08
+
+* Removed usage of deprecated :mod:`md5` module, replaced with :mod:`hashlib`.
+  Thanks to Gavin Sherry for the patch.
+
+* Start transactions on execute or executemany, rather than immediately at the
+  end of previous transaction.  Thanks to Ben Moran for the patch.
+
+* Add encoding lookups where needed, to address usage of SQL_ASCII encoding.
+  Thanks to Benjamin Schweizer for the patch.
+
+* Remove record type cache SQL query on every new pg8000 connection.
+
+* Fix and test SSL connections.
+
+* Handle out-of-band messages during authentication.
+
+
+=== Version 1.07, 2009-01-06
+
+* Added support for :meth:`~pg8000.dbapi.CursorWrapper.copy_to` and
+  :meth:`~pg8000.dbapi.CursorWrapper.copy_from` methods on cursor objects, to
+  allow the usage of the PostgreSQL COPY queries.  Thanks to Bob Ippolito for
+  the original patch.
+
+* Added the :attr:`~pg8000.dbapi.ConnectionWrapper.notifies` and
+  :attr:`~pg8000.dbapi.ConnectionWrapper.notifies_lock` attributes to DBAPI
+  connection objects to provide access to server-side event notifications.
+  Thanks again to Bob Ippolito for the original patch.
+
+* Improved performance using buffered socket I/O.
+
+* Added valid range checks for :class:`~pg8000.types.Interval` attributes.
+
+* Added binary transmission of :class:`~decimal.Decimal` values.  This permits
+  full support for NUMERIC[] types, both send and receive.
+
+* New `Sphinx <http://sphinx.pocoo.org/>`_-based website and documentation.
+
+
+=== Version 1.06, 2008-12-09
+
+* pg8000-py3: a branch of pg8000 fully supporting Python 3.0.
+
+* New Sphinx-based documentation.
+
+* Support for PostgreSQL array types -- INT2[], INT4[], INT8[], FLOAT[],
+  DOUBLE[], BOOL[], and TEXT[].  New support permits both sending and
+  receiving these values.
+
+* Limited support for receiving RECORD types.  If a record type is received,
+  it will be translated into a Python dict object.
+
+* Fixed potential threading bug where the socket lock could be lost during
+  error handling.
+
+
+=== Version 1.05, 2008-09-03
+
+* Proper support for timestamptz field type:
+
+  * Reading a timestamptz field results in a datetime.datetime instance that
+    has a valid tzinfo property.  tzinfo is always UTC.
+
+  * Sending a datetime.datetime instance with a tzinfo value will be
+    sent as a timestamptz type, with the appropriate tz conversions done.
+
+* Map postgres < -- > python text encodings correctly.
+
+* Fix bug where underscores were not permitted in pyformat names.
+
+* Support "%s" in a pyformat strin.
+
+* Add cursor.connection DB-API extension.
+
+* Add cursor.next and cursor.__iter__ DB-API extensions.
+
+* DBAPI documentation improvements.
+
+* Don't attempt rollback in cursor.execute if a ConnectionClosedError occurs.
+
+* Add warning for accessing exceptions as attributes on the connection object,
+  as per DB-API spec.
+
+* Fix up open connection when an unexpected connection occurs, rather than
+  leaving the connection in an unusable state.
+
+* Use setuptools/egg package format.
+
+
+=== Version 1.04, 2008-05-12
+
+* DBAPI 2.0 compatibility:
+
+  * rowcount returns rows affected when appropriate (eg. UPDATE, DELETE)
+
+  * Fix CursorWrapper.description to return a 7 element tuple, as per spec.
+
+  * Fix CursorWrapper.rowcount when using executemany.
+
+  * Fix CursorWrapper.fetchmany to return an empty sequence when no more
+    results are available.
+
+  * Add access to DBAPI exceptions through connection properties.
+
+  * Raise exception on closing a closed connection.
+
+  * Change DBAPI.STRING to varchar type.
+
+  * rowcount returns -1 when appropriate.
+
+  * DBAPI implementation now passes Stuart Bishop's Python DB API 2.0 Anal
+    Compliance Unit Test.
+
+* Make interface.Cursor class use unnamed prepared statement that binds to
+  parameter value types.  This change increases the accuracy of PG's query
+  plans by including parameter information, hence increasing performance in
+  some scenarios.
+
+* Raise exception when reading from a cursor without a result set.
+
+* Fix bug where a parse error may have rendered a connection unusable.
+
+
+=== Version 1.03, 2008-05-09
+
+* Separate pg8000.py into multiple python modules within the pg8000 package.
+  There should be no need for a client to change how pg8000 is imported.
+
+* Fix bug in row_description property when query has not been completed.
+
+* Fix bug in fetchmany dbapi method that did not properly deal with the end of
+  result sets.
+
+* Add close methods to DB connections.
+
+* Add callback event handlers for server notices, notifications, and runtime
+  configuration changes.
+
+* Add boolean type output.
+
+* Add date, time, and timestamp types in/out.
+
+* Add recognition of "SQL_ASCII" client encoding, which maps to Python's
+  "ascii" encoding.
+
+* Add types.Interval class to represent PostgreSQL's interval data type, and
+  appropriate wire send/receive methods.
+
+* Remove unused type conversion methods.
+
+
+=== Version 1.02, 2007-03-13
+
+* Add complete DB-API 2.0 interface.
+
+* Add basic SSL support via ssl connect bool.
+
+* Rewrite pg8000_test.py to use Python's unittest library.
+
+* Add bytea type support.
+
+* Add support for parameter output types: NULL value, timestamp value, python
+  long value.
+
+* Add support for input parameter type oid.
+
+
+=== Version 1.01, 2007-03-09
+
+* Add support for writing floats and decimal objs up to PG backend.
+
+* Add new error handling code and tests to make sure connection can recover
+  from a database error.
+
+* Fixed bug where timestamp types were not always returned in the same binary
+  format from the PG backend.  Text format is now being used to send
+  timestamps.
+
+* Fixed bug where large packets from the server were not being read fully, due
+  to socket.read not always returning full read size requested.  It was a
+  lazy-coding bug.
+
+* Added locks to make most of the library thread-safe.
+
+* Added UNIX socket support.
+
+
+=== Version 1.00, 2007-03-08
+
+* First public release.  Although fully functional, this release is mostly
+  lacking in production testing and in type support.
diff -pruN 1.10.6-3/README.creole 1.23.0-0ubuntu1/README.creole
--- 1.10.6-3/README.creole	2016-05-24 21:22:23.000000000 +0000
+++ 1.23.0-0ubuntu1/README.creole	1970-01-01 00:00:00.000000000 +0000
@@ -1,108 +0,0 @@
-=pg8000
-
-pg8000 is a pure-[[http://www.python.org/|Python]]
-[[http://www.postgresql.org/|PostgreSQL]] driver that complies with
-[[http://www.python.org/dev/peps/pep-0249/|DB-API 2.0]]. The driver
-communicates with the database using the
-[[http://www.postgresql.org/docs/current/static/protocol.html|PostgreSQL Frontend/Backend Protocol]].
-
-CircleCI [[https://circleci.com/gh/mfenniak/pg8000|Build Status]]:
- {{https://circleci.com/gh/mfenniak/pg8000.png?style=badge|CircleCI}}
-
-Links:
-
-* [[http://pythonhosted.org/pg8000/|User Docs]]
-* [[https://groups.google.com/forum/#!forum/pg8000|Forum]]
-* [[https://github.com/mfenniak/pg8000|Code, bugs, feature requests etc.]]
-
-=Regression Tests
-
-To run the regression tests, install [[http://testrun.org/tox/latest/|tox]]:
-
-{{{
-pip install tox
-}}}
-
-then install all the supported Python versions (using the
-[[https://launchpad.net/~fkrull/+archive/ubuntu/deadsnakes|APT Repository]] if
-you're using Ubuntu). Install all the currently supported versions of PostgreSQL
-(using the [[http://wiki.postgresql.org/wiki/Apt|APT Repository]] if you're
-using Ubuntu). Then for each of them, enable the hstore extension by running the
-SQL command:
-
-{{{
-create extension hstore;
-}}}
-
-and add a line to pg_hba.conf for the various authentication options, eg.
-
-{{{
-host    pg8000_md5      all             127.0.0.1/32            md5
-host    pg8000_gss      all             127.0.0.1/32            gss
-host    pg8000_password all             127.0.0.1/32            password
-host    all             all             127.0.0.1/32            trust
-}}}
-
-Set the following environment variables for the databases, for example:
-
-{{{
-export PG8000_TEST_NAME="PG8000_TEST_9_5"
-export PG8000_TEST_9_1="{'user': 'postgres', 'password': 'pw', 'port': 5435}"
-export PG8000_TEST_9_2="{'user': 'postgres', 'password': 'pw', 'port': 5434}"
-export PG8000_TEST_9_3="{'user': 'postgres', 'password': 'pw', 'port': 5433}"
-export PG8000_TEST_9_4="{'user': 'postgres', 'password': 'pw', 'port': 5432}"
-export PG8000_TEST_9_5="{'user': 'postgres', 'password': 'pw', 'port': 5431}"
-}}}
-
-then run {{{tox}}} from the {{{pg8000}}} directory:
-
-{{{
-tox
-}}}
-
-
-==Performance Tests
-
-To run the performance tests from the {{{pg8000}}} directory:
-
-{{{
-python -m pg8000.tests.performance
-}}}
-
-==Stress Test
-
-There's a stress test that is run by doing:
-
-{{{
-python ./multi
-}}}
-
-The idea is to set {{{shared_buffers}}} in postgresql.conf to 128kB, and then
-run the stress test, and you should get {{{no unpinned buffers}}} errors.
-
-=Building The Documentation
-
-The docs are written using [[http://sphinx-doc.org/|Sphinx]]. To build them,
-install sphinx:
-
-{{{
-pip install sphinx
-}}}
-
-Then type:
-
-{{{
-python setup.py build_sphinx
-}}}
-
-and the docs will appear in {{{build/sphinx/html}}}.
-
-=Doing A Release Of pg8000
-
-Run {{{tox}}} to make sure all tests pass, then update
-{{{doc/release_notes.rst}}} then do:
-
-{{{
-git tag -a x.y.z -m "version x.y.z"
-python setup.py register sdist bdist_wheel upload build_sphinx upload_docs
-}}}
diff -pruN 1.10.6-3/setup.cfg 1.23.0-0ubuntu1/setup.cfg
--- 1.10.6-3/setup.cfg	2016-06-10 10:37:25.000000000 +0000
+++ 1.23.0-0ubuntu1/setup.cfg	2021-11-13 10:21:00.416994000 +0000
@@ -1,9 +1,3 @@
-[upload_docs]
-upload-dir = build/sphinx/html
-
-[bdist_wheel]
-universal = 1
-
 [versioneer]
 vcs = git
 style = pep440
@@ -12,8 +6,38 @@ versionfile_build = pg8000/_version.py
 tag_prefix = 
 parentdir_prefix = pg8000-
 
+[tox:tox]
+
+[testenv]
+passenv = PGPORT
+commands = 
+	black --check .
+	flake8 .
+	python -m pytest -x test
+	python setup.py check
+deps = 
+	pytest
+	pytest-mock
+	pytest-benchmark
+	black
+	flake8
+	flake8-alphabetize
+	pytz
+
+[testenv:py38]
+setenv = 
+	USER = postgres
+commands_post = 
+	python -m doctest -o ELLIPSIS README.adoc
+
+[flake8]
+application-names = pg8000
+ignore = E203,W503
+max-line-length = 88
+exclude = .git,__pycache__,build,dist,venv,.tox
+application-import-names = pg8000
+
 [egg_info]
 tag_build = 
 tag_date = 0
-tag_svn_revision = 0
 
diff -pruN 1.10.6-3/setup.py 1.23.0-0ubuntu1/setup.py
--- 1.10.6-3/setup.py	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/setup.py	2021-10-30 14:12:42.000000000 +0000
@@ -1,8 +1,9 @@
 #!/usr/bin/env python
 
-import versioneer
 from setuptools import setup
 
+import versioneer
+
 long_description = """\
 
 pg8000
@@ -21,12 +22,6 @@ PostgreSQL interface for Python."""
 cmdclass = dict(versioneer.get_cmdclass())
 version = versioneer.get_version()
 
-try:
-    from sphinx.setup_command import BuildDoc
-    cmdclass['build_sphinx'] = BuildDoc
-except ImportError:
-    pass
-
 setup(
     name="pg8000",
     version=version,
@@ -35,22 +30,21 @@ setup(
     long_description=long_description,
     author="Mathieu Fenniak",
     author_email="biziqe@mathieu.fenniak.net",
-    url="https://github.com/mfenniak/pg8000",
+    url="https://github.com/tlocke/pg8000",
     license="BSD",
-    install_requires = [
-        "six>=1.10.0",
-    ],
+    python_requires=">=3.6",
+    install_requires=["scramp>=1.4.1"],
     classifiers=[
-        "Development Status :: 4 - Beta",
+        "Development Status :: 5 - Production/Stable",
         "Intended Audience :: Developers",
         "License :: OSI Approved :: BSD License",
         "Programming Language :: Python",
-        "Programming Language :: Python :: 2",
-        "Programming Language :: Python :: 2.7",
         "Programming Language :: Python :: 3",
-        "Programming Language :: Python :: 3.3",
-        "Programming Language :: Python :: 3.4",
-        "Programming Language :: Python :: 3.5",
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
+        "Programming Language :: Python :: 3.9",
+        "Programming Language :: Python :: 3.10",
         "Programming Language :: Python :: Implementation",
         "Programming Language :: Python :: Implementation :: CPython",
         "Programming Language :: Python :: Implementation :: Jython",
@@ -61,8 +55,4 @@ setup(
     ],
     keywords="postgresql dbapi",
     packages=("pg8000",),
-    command_options={
-        'build_sphinx': {
-            'version': ('setup.py', version),
-            'release': ('setup.py', version)}},
 )
diff -pruN 1.10.6-3/test/dbapi/conftest.py 1.23.0-0ubuntu1/test/dbapi/conftest.py
--- 1.10.6-3/test/dbapi/conftest.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/conftest.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,58 @@
+import sys
+from os import environ
+
+import pytest
+
+import pg8000.dbapi
+
+
+@pytest.fixture(scope="class")
+def db_kwargs():
+    db_connect = {"user": "postgres", "password": "pw"}
+
+    for kw, var, f in [
+        ("host", "PGHOST", str),
+        ("password", "PGPASSWORD", str),
+        ("port", "PGPORT", int),
+    ]:
+        try:
+            db_connect[kw] = f(environ[var])
+        except KeyError:
+            pass
+
+    return db_connect
+
+
+@pytest.fixture
+def con(request, db_kwargs):
+    conn = pg8000.dbapi.connect(**db_kwargs)
+
+    def fin():
+        try:
+            conn.rollback()
+        except pg8000.dbapi.InterfaceError:
+            pass
+
+        try:
+            conn.close()
+        except pg8000.dbapi.InterfaceError:
+            pass
+
+    request.addfinalizer(fin)
+    return conn
+
+
+@pytest.fixture
+def cursor(request, con):
+    cursor = con.cursor()
+
+    def fin():
+        cursor.close()
+
+    request.addfinalizer(fin)
+    return cursor
+
+
+@pytest.fixture
+def is_java():
+    return "java" in sys.platform.lower()
diff -pruN 1.10.6-3/test/dbapi/github-actions/gss_dbapi.py 1.23.0-0ubuntu1/test/dbapi/github-actions/gss_dbapi.py
--- 1.10.6-3/test/dbapi/github-actions/gss_dbapi.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/github-actions/gss_dbapi.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,14 @@
+import pytest
+
+import pg8000.dbapi
+
+
+def test_gss(db_kwargs):
+    """Called by GitHub Actions with auth method gss."""
+
+    # Should raise an exception saying gss isn't supported
+    with pytest.raises(
+        pg8000.dbapi.InterfaceError,
+        match="Authentication method 7 not supported by pg8000.",
+    ):
+        pg8000.dbapi.connect(**db_kwargs)
diff -pruN 1.10.6-3/test/dbapi/github-actions/md5_dbapi.py 1.23.0-0ubuntu1/test/dbapi/github-actions/md5_dbapi.py
--- 1.10.6-3/test/dbapi/github-actions/md5_dbapi.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/github-actions/md5_dbapi.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_md5(con):
+    """Called by GitHub Actions with auth method md5.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/dbapi/github-actions/password_dbapi.py 1.23.0-0ubuntu1/test/dbapi/github-actions/password_dbapi.py
--- 1.10.6-3/test/dbapi/github-actions/password_dbapi.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/github-actions/password_dbapi.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_password(con):
+    """Called by GitHub Actions with auth method password.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/dbapi/github-actions/scram-sha-256_dbapi.py 1.23.0-0ubuntu1/test/dbapi/github-actions/scram-sha-256_dbapi.py
--- 1.10.6-3/test/dbapi/github-actions/scram-sha-256_dbapi.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/github-actions/scram-sha-256_dbapi.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_scram_sha_256(con):
+    """Called by GitHub Actions with auth method scram-sha-256.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/dbapi/github-actions/ssl_dbapi.py 1.23.0-0ubuntu1/test/dbapi/github-actions/ssl_dbapi.py
--- 1.10.6-3/test/dbapi/github-actions/ssl_dbapi.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/github-actions/ssl_dbapi.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,63 @@
+import ssl
+import sys
+
+import pytest
+
+import pg8000.dbapi
+
+
+# Check if running in Jython
+if "java" in sys.platform:
+    from javax.net.ssl import TrustManager, X509TrustManager
+    from jarray import array
+    from javax.net.ssl import SSLContext
+
+    class TrustAllX509TrustManager(X509TrustManager):
+        """Define a custom TrustManager which will blindly accept all
+        certificates"""
+
+        def checkClientTrusted(self, chain, auth):
+            pass
+
+        def checkServerTrusted(self, chain, auth):
+            pass
+
+        def getAcceptedIssuers(self):
+            return None
+
+    # Create a static reference to an SSLContext which will use
+    # our custom TrustManager
+    trust_managers = array([TrustAllX509TrustManager()], TrustManager)
+    TRUST_ALL_CONTEXT = SSLContext.getInstance("SSL")
+    TRUST_ALL_CONTEXT.init(None, trust_managers, None)
+    # Keep a static reference to the JVM's default SSLContext for restoring
+    # at a later time
+    DEFAULT_CONTEXT = SSLContext.getDefault()
+
+
+@pytest.fixture
+def trust_all_certificates(request):
+    """Decorator function that will make it so the context of the decorated
+    method will run with our TrustManager that accepts all certificates"""
+    # Only do this if running under Jython
+    is_java = "java" in sys.platform
+
+    if is_java:
+        from javax.net.ssl import SSLContext
+
+        SSLContext.setDefault(TRUST_ALL_CONTEXT)
+
+    def fin():
+        if is_java:
+            SSLContext.setDefault(DEFAULT_CONTEXT)
+
+    request.addfinalizer(fin)
+
+
+@pytest.mark.usefixtures("trust_all_certificates")
+def test_ssl(db_kwargs):
+    context = ssl.SSLContext()
+    context.check_hostname = False
+    db_kwargs["ssl_context"] = context
+    with pg8000.dbapi.connect(**db_kwargs):
+        pass
diff -pruN 1.10.6-3/test/dbapi/test_auth.py 1.23.0-0ubuntu1/test/dbapi/test_auth.py
--- 1.10.6-3/test/dbapi/test_auth.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_auth.py	2021-07-30 12:49:25.000000000 +0000
@@ -0,0 +1,54 @@
+import pytest
+
+import pg8000.dbapi
+
+
+# This requires a line in pg_hba.conf that requires md5 for the database
+# pg8000_md5
+
+
+def testMd5(db_kwargs):
+    db_kwargs["database"] = "pg8000_md5"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.dbapi.DatabaseError, match="3D000"):
+        pg8000.dbapi.connect(**db_kwargs)
+
+
+# This requires a line in pg_hba.conf that requires gss for the database
+# pg8000_gss
+
+
+def testGss(db_kwargs):
+    db_kwargs["database"] = "pg8000_gss"
+
+    # Should raise an exception saying gss isn't supported
+    with pytest.raises(
+        pg8000.dbapi.InterfaceError,
+        match="Authentication method 7 not supported by pg8000.",
+    ):
+        pg8000.dbapi.connect(**db_kwargs)
+
+
+# This requires a line in pg_hba.conf that requires 'password' for the
+# database pg8000_password
+
+
+def testPassword(db_kwargs):
+    db_kwargs["database"] = "pg8000_password"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.dbapi.DatabaseError, match="3D000"):
+        pg8000.dbapi.connect(**db_kwargs)
+
+
+# This requires a line in pg_hba.conf that requires scram-sha-256 for the
+# database scram-sha-256
+
+
+def test_scram_sha_256(db_kwargs):
+    db_kwargs["database"] = "pg8000_scram_sha_256"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.dbapi.DatabaseError, match="3D000"):
+        pg8000.dbapi.connect(**db_kwargs)
diff -pruN 1.10.6-3/test/dbapi/test_benchmarks.py 1.23.0-0ubuntu1/test/dbapi/test_benchmarks.py
--- 1.10.6-3/test/dbapi/test_benchmarks.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_benchmarks.py	2021-11-13 09:58:13.000000000 +0000
@@ -0,0 +1,27 @@
+import pytest
+
+
+@pytest.mark.parametrize(
+    "txt",
+    (
+        ("int2", "cast(id / 100 as int2)"),
+        "cast(id as int4)",
+        "cast(id * 100 as int8)",
+        "(id % 2) = 0",
+        "N'Static text string'",
+        "cast(id / 100 as float4)",
+        "cast(id / 100 as float8)",
+        "cast(id / 100 as numeric)",
+        "timestamp '2001-09-28'",
+    ),
+)
+def test_round_trips(con, benchmark, txt):
+    def torun():
+        query = f"""SELECT {txt}, {txt}, {txt}, {txt}, {txt}, {txt}, {txt}
+            FROM (SELECT generate_series(1, 10000) AS id) AS tbl"""
+        cursor = con.cursor()
+        cursor.execute(query)
+        cursor.fetchall()
+        cursor.close()
+
+    benchmark(torun)
diff -pruN 1.10.6-3/test/dbapi/test_connection.py 1.23.0-0ubuntu1/test/dbapi/test_connection.py
--- 1.10.6-3/test/dbapi/test_connection.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_connection.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,206 @@
+import datetime
+import warnings
+
+import pytest
+
+import pg8000
+
+
+def test_unix_socket_missing():
+    conn_params = {"unix_sock": "/file-does-not-exist", "user": "doesn't-matter"}
+
+    with pytest.raises(pg8000.dbapi.InterfaceError):
+        pg8000.dbapi.connect(**conn_params)
+
+
+def test_internet_socket_connection_refused():
+    conn_params = {"port": 0, "user": "doesn't-matter"}
+
+    with pytest.raises(
+        pg8000.dbapi.InterfaceError,
+        match="Can't create a connection to host localhost and port 0 "
+        "\\(timeout is None and source_address is None\\).",
+    ):
+        pg8000.dbapi.connect(**conn_params)
+
+
+def test_database_missing(db_kwargs):
+    db_kwargs["database"] = "missing-db"
+    with pytest.raises(pg8000.dbapi.DatabaseError):
+        pg8000.dbapi.connect(**db_kwargs)
+
+
+def test_database_name_unicode(db_kwargs):
+    db_kwargs["database"] = "pg8000_sn\uFF6Fw"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.dbapi.DatabaseError, match="3D000"):
+        pg8000.dbapi.connect(**db_kwargs)
+
+
+def test_database_name_bytes(db_kwargs):
+    """Should only raise an exception saying db doesn't exist"""
+
+    db_kwargs["database"] = bytes("pg8000_sn\uFF6Fw", "utf8")
+    with pytest.raises(pg8000.dbapi.DatabaseError, match="3D000"):
+        pg8000.dbapi.connect(**db_kwargs)
+
+
+def test_password_bytes(con, db_kwargs):
+    # Create user
+    username = "boltzmann"
+    password = "cha\uFF6Fs"
+    cur = con.cursor()
+    cur.execute("create user " + username + " with password '" + password + "';")
+    con.commit()
+
+    db_kwargs["user"] = username
+    db_kwargs["password"] = password.encode("utf8")
+    db_kwargs["database"] = "pg8000_md5"
+    with pytest.raises(pg8000.dbapi.DatabaseError, match="3D000"):
+        pg8000.dbapi.connect(**db_kwargs)
+
+    cur.execute("drop role " + username)
+    con.commit()
+
+
+def test_application_name(db_kwargs):
+    app_name = "my test application name"
+    db_kwargs["application_name"] = app_name
+    with pg8000.dbapi.connect(**db_kwargs) as db:
+        cur = db.cursor()
+        cur.execute(
+            "select application_name from pg_stat_activity "
+            " where pid = pg_backend_pid()"
+        )
+
+        application_name = cur.fetchone()[0]
+        assert application_name == app_name
+
+
+def test_application_name_integer(db_kwargs):
+    db_kwargs["application_name"] = 1
+    with pytest.raises(
+        pg8000.dbapi.InterfaceError,
+        match="The parameter application_name can't be of type " "<class 'int'>.",
+    ):
+        pg8000.dbapi.connect(**db_kwargs)
+
+
+def test_application_name_bytearray(db_kwargs):
+    db_kwargs["application_name"] = bytearray(b"Philby")
+    pg8000.dbapi.connect(**db_kwargs)
+
+
+def test_notify(con):
+    cursor = con.cursor()
+    cursor.execute("select pg_backend_pid()")
+    backend_pid = cursor.fetchall()[0][0]
+    assert list(con.notifications) == []
+    cursor.execute("LISTEN test")
+    cursor.execute("NOTIFY test")
+    con.commit()
+
+    cursor.execute("VALUES (1, 2), (3, 4), (5, 6)")
+    assert len(con.notifications) == 1
+    assert con.notifications[0] == (backend_pid, "test", "")
+
+
+def test_notify_with_payload(con):
+    cursor = con.cursor()
+    cursor.execute("select pg_backend_pid()")
+    backend_pid = cursor.fetchall()[0][0]
+    assert list(con.notifications) == []
+    cursor.execute("LISTEN test")
+    cursor.execute("NOTIFY test, 'Parnham'")
+    con.commit()
+
+    cursor.execute("VALUES (1, 2), (3, 4), (5, 6)")
+    assert len(con.notifications) == 1
+    assert con.notifications[0] == (backend_pid, "test", "Parnham")
+
+
+def test_broken_pipe_read(con, db_kwargs):
+    db1 = pg8000.dbapi.connect(**db_kwargs)
+    cur1 = db1.cursor()
+    cur2 = con.cursor()
+    cur1.execute("select pg_backend_pid()")
+    pid1 = cur1.fetchone()[0]
+
+    cur2.execute("select pg_terminate_backend(%s)", (pid1,))
+    with pytest.raises(pg8000.dbapi.InterfaceError, match="network error on read"):
+        cur1.execute("select 1")
+
+
+def test_broken_pipe_flush(con, db_kwargs):
+    db1 = pg8000.dbapi.connect(**db_kwargs)
+    cur1 = db1.cursor()
+    cur2 = con.cursor()
+    cur1.execute("select pg_backend_pid()")
+    pid1 = cur1.fetchone()[0]
+
+    cur2.execute("select pg_terminate_backend(%s)", (pid1,))
+    try:
+        cur1.execute("select 1")
+    except BaseException:
+        pass
+
+    # Sometimes raises and sometime doesn't
+    try:
+        db1.close()
+    except pg8000.exceptions.InterfaceError as e:
+        assert str(e) == "network error on flush"
+
+
+def test_broken_pipe_unpack(con):
+    cur = con.cursor()
+    cur.execute("select pg_backend_pid()")
+    pid1 = cur.fetchone()[0]
+
+    with pytest.raises(pg8000.dbapi.InterfaceError, match="network error"):
+        cur.execute("select pg_terminate_backend(%s)", (pid1,))
+
+
+def test_py_value_fail(con, mocker):
+    # Ensure that if types.py_value throws an exception, the original
+    # exception is raised (PG8000TestException), and the connection is
+    # still usable after the error.
+
+    class PG8000TestException(Exception):
+        pass
+
+    def raise_exception(val):
+        raise PG8000TestException("oh noes!")
+
+    mocker.patch.object(con, "py_types")
+    con.py_types = {datetime.time: raise_exception}
+
+    with pytest.raises(PG8000TestException):
+        c = con.cursor()
+        c.execute("SELECT CAST(%s AS TIME) AS f1", (datetime.time(10, 30),))
+        c.fetchall()
+
+        # ensure that the connection is still usable for a new query
+        c.execute("VALUES ('hw3'::text)")
+        assert c.fetchone()[0] == "hw3"
+
+
+def test_no_data_error_recovery(con):
+    for i in range(1, 4):
+        with pytest.raises(pg8000.DatabaseError) as e:
+            c = con.cursor()
+            c.execute("DROP TABLE t1")
+        assert e.value.args[0]["C"] == "42P01"
+        con.rollback()
+
+
+def test_closed_connection(db_kwargs):
+    warnings.simplefilter("ignore")
+
+    my_db = pg8000.connect(**db_kwargs)
+    cursor = my_db.cursor()
+    my_db.close()
+    with pytest.raises(my_db.InterfaceError, match="connection is closed"):
+        cursor.execute("VALUES ('hw1'::text)")
+
+    warnings.resetwarnings()
diff -pruN 1.10.6-3/test/dbapi/test_copy.py 1.23.0-0ubuntu1/test/dbapi/test_copy.py
--- 1.10.6-3/test/dbapi/test_copy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_copy.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,89 @@
+from io import BytesIO
+
+import pytest
+
+
+@pytest.fixture
+def db_table(request, con):
+    cursor = con.cursor()
+    cursor.execute(
+        "CREATE TEMPORARY TABLE t1 (f1 int primary key, "
+        "f2 int not null, f3 varchar(50) null) on commit drop"
+    )
+    return con
+
+
+def test_copy_to_with_table(db_table):
+    cursor = db_table.cursor()
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, 1))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 2, 2))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 3, 3))
+
+    stream = BytesIO()
+    cursor.execute("copy t1 to stdout", stream=stream)
+    assert stream.getvalue() == b"1\t1\t1\n2\t2\t2\n3\t3\t3\n"
+    assert cursor.rowcount == 3
+
+
+def test_copy_to_with_query(db_table):
+    cursor = db_table.cursor()
+    stream = BytesIO()
+    cursor.execute(
+        "COPY (SELECT 1 as One, 2 as Two) TO STDOUT WITH DELIMITER "
+        "'X' CSV HEADER QUOTE AS 'Y' FORCE QUOTE Two",
+        stream=stream,
+    )
+    assert stream.getvalue() == b"oneXtwo\n1XY2Y\n"
+    assert cursor.rowcount == 1
+
+
+def test_copy_from_with_table(db_table):
+    cursor = db_table.cursor()
+    stream = BytesIO(b"1\t1\t1\n2\t2\t2\n3\t3\t3\n")
+    cursor.execute("copy t1 from STDIN", stream=stream)
+    assert cursor.rowcount == 3
+
+    cursor.execute("SELECT * FROM t1 ORDER BY f1")
+    retval = cursor.fetchall()
+    assert retval == ([1, 1, "1"], [2, 2, "2"], [3, 3, "3"])
+
+
+def test_copy_from_with_query(db_table):
+    cursor = db_table.cursor()
+    stream = BytesIO(b"f1Xf2\n1XY1Y\n")
+    cursor.execute(
+        "COPY t1 (f1, f2) FROM STDIN WITH DELIMITER 'X' CSV HEADER "
+        "QUOTE AS 'Y' FORCE NOT NULL f1",
+        stream=stream,
+    )
+    assert cursor.rowcount == 1
+
+    cursor.execute("SELECT * FROM t1 ORDER BY f1")
+    retval = cursor.fetchall()
+    assert retval == ([1, 1, None],)
+
+
+def test_copy_from_with_error(db_table):
+    cursor = db_table.cursor()
+    stream = BytesIO(b"f1Xf2\n\n1XY1Y\n")
+    with pytest.raises(BaseException) as e:
+        cursor.execute(
+            "COPY t1 (f1, f2) FROM STDIN WITH DELIMITER 'X' CSV HEADER "
+            "QUOTE AS 'Y' FORCE NOT NULL f1",
+            stream=stream,
+        )
+
+    arg = {
+        "S": ("ERROR",),
+        "C": ("22P02",),
+        "M": (
+            'invalid input syntax for type integer: ""',
+            'invalid input syntax for integer: ""',
+        ),
+        "W": ('COPY t1, line 2, column f1: ""',),
+        "F": ("numutils.c",),
+        "R": ("pg_atoi", "pg_strtoint32"),
+    }
+    earg = e.value.args[0]
+    for k, v in arg.items():
+        assert earg[k] in v
diff -pruN 1.10.6-3/test/dbapi/test_dbapi20.py 1.23.0-0ubuntu1/test/dbapi/test_dbapi20.py
--- 1.10.6-3/test/dbapi/test_dbapi20.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_dbapi20.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,664 @@
+import time
+import warnings
+
+import pytest
+
+import pg8000
+
+""" Python DB API 2.0 driver compliance unit test suite.
+
+    This software is Public Domain and may be used without restrictions.
+
+ "Now we have booze and barflies entering the discussion, plus rumours of
+  DBAs on drugs... and I won't tell you what flashes through my mind each
+  time I read the subject line with 'Anal Compliance' in it.  All around
+  this is turning out to be a thoroughly unwholesome unit test."
+
+    -- Ian Bicking
+"""
+
+__rcs_id__ = "$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $"
+__version__ = "$Revision: 1.10 $"[11:-2]
+__author__ = "Stuart Bishop <zen@shangri-la.dropbear.id.au>"
+
+
+# $Log: dbapi20.py,v $
+# Revision 1.10  2003/10/09 03:14:14  zenzen
+# Add test for DB API 2.0 optional extension, where database exceptions
+# are exposed as attributes on the Connection object.
+#
+# Revision 1.9  2003/08/13 01:16:36  zenzen
+# Minor tweak from Stefan Fleiter
+#
+# Revision 1.8  2003/04/10 00:13:25  zenzen
+# Changes, as per suggestions by M.-A. Lemburg
+# - Add a table prefix, to ensure namespace collisions can always be avoided
+#
+# Revision 1.7  2003/02/26 23:33:37  zenzen
+# Break out DDL into helper functions, as per request by David Rushby
+#
+# Revision 1.6  2003/02/21 03:04:33  zenzen
+# Stuff from Henrik Ekelund:
+#     added test_None
+#     added test_nextset & hooks
+#
+# Revision 1.5  2003/02/17 22:08:43  zenzen
+# Implement suggestions and code from Henrik Eklund - test that
+# cursor.arraysize defaults to 1 & generic cursor.callproc test added
+#
+# Revision 1.4  2003/02/15 00:16:33  zenzen
+# Changes, as per suggestions and bug reports by M.-A. Lemburg,
+# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
+# - Class renamed
+# - Now a subclass of TestCase, to avoid requiring the driver stub
+#   to use multiple inheritance
+# - Reversed the polarity of buggy test in test_description
+# - Test exception heirarchy correctly
+# - self.populate is now self._populate(), so if a driver stub
+#   overrides self.ddl1 this change propogates
+# - VARCHAR columns now have a width, which will hopefully make the
+#   DDL even more portible (this will be reversed if it causes more problems)
+# - cursor.rowcount being checked after various execute and fetchXXX methods
+# - Check for fetchall and fetchmany returning empty lists after results
+#   are exhausted (already checking for empty lists if select retrieved
+#   nothing
+# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
+#
+
+
+""" Test a database self.driver for DB API 2.0 compatibility.
+    This implementation tests Gadfly, but the TestCase
+    is structured so that other self.drivers can subclass this
+    test case to ensure compiliance with the DB-API. It is
+    expected that this TestCase may be expanded in the future
+    if ambiguities or edge conditions are discovered.
+
+    The 'Optional Extensions' are not yet being tested.
+
+    self.drivers should subclass this test, overriding setUp, tearDown,
+    self.driver, connect_args and connect_kw_args. Class specification
+    should be as follows:
+
+    import dbapi20
+    class mytest(dbapi20.DatabaseAPI20Test):
+       [...]
+
+    Don't 'import DatabaseAPI20Test from dbapi20', or you will
+    confuse the unit tester - just 'import dbapi20'.
+"""
+
+# The self.driver module. This should be the module where the 'connect'
+# method is to be found
+driver = pg8000
+table_prefix = "dbapi20test_"  # If you need to specify a prefix for tables
+
+ddl1 = "create table %sbooze (name varchar(20))" % table_prefix
+ddl2 = "create table %sbarflys (name varchar(20))" % table_prefix
+xddl1 = "drop table %sbooze" % table_prefix
+xddl2 = "drop table %sbarflys" % table_prefix
+
+# Name of stored procedure to convert
+# string->lowercase
+lowerfunc = "lower"
+
+
+# Some drivers may need to override these helpers, for example adding
+# a 'commit' after the execute.
+def executeDDL1(cursor):
+    cursor.execute(ddl1)
+
+
+def executeDDL2(cursor):
+    cursor.execute(ddl2)
+
+
+@pytest.fixture
+def db(request, con):
+    def fin():
+        with con.cursor() as cur:
+            for ddl in (xddl1, xddl2):
+                try:
+                    cur.execute(ddl)
+                    con.commit()
+                except driver.Error:
+                    # Assume table didn't exist. Other tests will check if
+                    # execute is busted.
+                    pass
+
+    request.addfinalizer(fin)
+    return con
+
+
+def test_apilevel():
+    # Must exist
+    apilevel = driver.apilevel
+
+    # Must equal 2.0
+    assert apilevel == "2.0"
+
+
+def test_threadsafety():
+    try:
+        # Must exist
+        threadsafety = driver.threadsafety
+        # Must be a valid value
+        assert threadsafety in (0, 1, 2, 3)
+    except AttributeError:
+        assert False, "Driver doesn't define threadsafety"
+
+
+def test_paramstyle():
+    try:
+        # Must exist
+        paramstyle = driver.paramstyle
+        # Must be a valid value
+        assert paramstyle in ("qmark", "numeric", "named", "format", "pyformat")
+    except AttributeError:
+        assert False, "Driver doesn't define paramstyle"
+
+
+def test_Exceptions():
+    # Make sure required exceptions exist, and are in the
+    # defined heirarchy.
+    assert issubclass(driver.Warning, Exception)
+    assert issubclass(driver.Error, Exception)
+    assert issubclass(driver.InterfaceError, driver.Error)
+    assert issubclass(driver.DatabaseError, driver.Error)
+    assert issubclass(driver.OperationalError, driver.Error)
+    assert issubclass(driver.IntegrityError, driver.Error)
+    assert issubclass(driver.InternalError, driver.Error)
+    assert issubclass(driver.ProgrammingError, driver.Error)
+    assert issubclass(driver.NotSupportedError, driver.Error)
+
+
+def test_ExceptionsAsConnectionAttributes(con):
+    # OPTIONAL EXTENSION
+    # Test for the optional DB API 2.0 extension, where the exceptions
+    # are exposed as attributes on the Connection object
+    # I figure this optional extension will be implemented by any
+    # driver author who is using this test suite, so it is enabled
+    # by default.
+    warnings.simplefilter("ignore")
+    drv = driver
+    assert con.Warning is drv.Warning
+    assert con.Error is drv.Error
+    assert con.InterfaceError is drv.InterfaceError
+    assert con.DatabaseError is drv.DatabaseError
+    assert con.OperationalError is drv.OperationalError
+    assert con.IntegrityError is drv.IntegrityError
+    assert con.InternalError is drv.InternalError
+    assert con.ProgrammingError is drv.ProgrammingError
+    assert con.NotSupportedError is drv.NotSupportedError
+    warnings.resetwarnings()
+
+
+def test_commit(con):
+    # Commit must work, even if it doesn't do anything
+    con.commit()
+
+
+def test_rollback(con):
+    # If rollback is defined, it should either work or throw
+    # the documented exception
+    if hasattr(con, "rollback"):
+        try:
+            con.rollback()
+        except driver.NotSupportedError:
+            pass
+
+
+def test_cursor(con):
+    con.cursor()
+
+
+def test_cursor_isolation(con):
+    # Make sure cursors created from the same connection have
+    # the documented transaction isolation level
+    cur1 = con.cursor()
+    cur2 = con.cursor()
+    executeDDL1(cur1)
+    cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
+    cur2.execute("select name from %sbooze" % table_prefix)
+    booze = cur2.fetchall()
+    assert len(booze) == 1
+    assert len(booze[0]) == 1
+    assert booze[0][0] == "Victoria Bitter"
+
+
+def test_description(con):
+    cur = con.cursor()
+    executeDDL1(cur)
+    assert cur.description is None, (
+        "cursor.description should be none after executing a "
+        "statement that can return no rows (such as DDL)"
+    )
+    cur.execute("select name from %sbooze" % table_prefix)
+    assert len(cur.description) == 1, "cursor.description describes too many columns"
+    assert (
+        len(cur.description[0]) == 7
+    ), "cursor.description[x] tuples must have 7 elements"
+    assert (
+        cur.description[0][0].lower() == "name"
+    ), "cursor.description[x][0] must return column name"
+    assert cur.description[0][1] == driver.STRING, (
+        "cursor.description[x][1] must return column type. Got %r"
+        % cur.description[0][1]
+    )
+
+    # Make sure self.description gets reset
+    executeDDL2(cur)
+    assert cur.description is None, (
+        "cursor.description not being set to None when executing "
+        "no-result statements (eg. DDL)"
+    )
+
+
+def test_rowcount(cursor):
+    executeDDL1(cursor)
+    assert cursor.rowcount == -1, (
+        "cursor.rowcount should be -1 after executing no-result " "statements"
+    )
+    cursor.execute("insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
+    assert cursor.rowcount in (-1, 1), (
+        "cursor.rowcount should == number or rows inserted, or "
+        "set to -1 after executing an insert statement"
+    )
+    cursor.execute("select name from %sbooze" % table_prefix)
+    assert cursor.rowcount in (-1, 1), (
+        "cursor.rowcount should == number of rows returned, or "
+        "set to -1 after executing a select statement"
+    )
+    executeDDL2(cursor)
+    assert cursor.rowcount == -1, (
+        "cursor.rowcount not being reset to -1 after executing " "no-result statements"
+    )
+
+
+def test_close(con):
+    cur = con.cursor()
+    con.close()
+
+    # cursor.execute should raise an Error if called after connection
+    # closed
+    with pytest.raises(driver.Error):
+        executeDDL1(cur)
+
+    # connection.commit should raise an Error if called after connection'
+    # closed.'
+    with pytest.raises(driver.Error):
+        con.commit()
+
+    # connection.close should raise an Error if called more than once
+    with pytest.raises(driver.Error):
+        con.close()
+
+
+def test_execute(con):
+    cur = con.cursor()
+    _paraminsert(cur)
+
+
+def _paraminsert(cur):
+    executeDDL1(cur)
+    cur.execute("insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
+    assert cur.rowcount in (-1, 1)
+
+    if driver.paramstyle == "qmark":
+        cur.execute("insert into %sbooze values (?)" % table_prefix, ("Cooper's",))
+    elif driver.paramstyle == "numeric":
+        cur.execute("insert into %sbooze values (:1)" % table_prefix, ("Cooper's",))
+    elif driver.paramstyle == "named":
+        cur.execute(
+            "insert into %sbooze values (:beer)" % table_prefix, {"beer": "Cooper's"}
+        )
+    elif driver.paramstyle == "format":
+        cur.execute("insert into %sbooze values (%%s)" % table_prefix, ("Cooper's",))
+    elif driver.paramstyle == "pyformat":
+        cur.execute(
+            "insert into %sbooze values (%%(beer)s)" % table_prefix,
+            {"beer": "Cooper's"},
+        )
+    else:
+        assert False, "Invalid paramstyle"
+
+    assert cur.rowcount in (-1, 1)
+
+    cur.execute("select name from %sbooze" % table_prefix)
+    res = cur.fetchall()
+    assert len(res) == 2, "cursor.fetchall returned too few rows"
+    beers = [res[0][0], res[1][0]]
+    beers.sort()
+    assert beers[0] == "Cooper's", (
+        "cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly"
+    )
+    assert beers[1] == "Victoria Bitter", (
+        "cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly"
+    )
+
+
+def test_executemany(cursor):
+    executeDDL1(cursor)
+    largs = [("Cooper's",), ("Boag's",)]
+    margs = [{"beer": "Cooper's"}, {"beer": "Boag's"}]
+    if driver.paramstyle == "qmark":
+        cursor.executemany("insert into %sbooze values (?)" % table_prefix, largs)
+    elif driver.paramstyle == "numeric":
+        cursor.executemany("insert into %sbooze values (:1)" % table_prefix, largs)
+    elif driver.paramstyle == "named":
+        cursor.executemany("insert into %sbooze values (:beer)" % table_prefix, margs)
+    elif driver.paramstyle == "format":
+        cursor.executemany("insert into %sbooze values (%%s)" % table_prefix, largs)
+    elif driver.paramstyle == "pyformat":
+        cursor.executemany(
+            "insert into %sbooze values (%%(beer)s)" % (table_prefix), margs
+        )
+    else:
+        assert False, "Unknown paramstyle"
+
+    assert cursor.rowcount in (-1, 2), (
+        "insert using cursor.executemany set cursor.rowcount to "
+        "incorrect value %r" % cursor.rowcount
+    )
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    res = cursor.fetchall()
+    assert len(res) == 2, "cursor.fetchall retrieved incorrect number of rows"
+    beers = [res[0][0], res[1][0]]
+    beers.sort()
+    assert beers[0] == "Boag's", "incorrect data retrieved"
+    assert beers[1] == "Cooper's", "incorrect data retrieved"
+
+
+def test_fetchone(cursor):
+    # cursor.fetchone should raise an Error if called before
+    # executing a select-type query
+    with pytest.raises(driver.Error):
+        cursor.fetchone()
+
+    # cursor.fetchone should raise an Error if called after
+    # executing a query that cannnot return rows
+    executeDDL1(cursor)
+    with pytest.raises(driver.Error):
+        cursor.fetchone()
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    assert cursor.fetchone() is None, (
+        "cursor.fetchone should return None if a query retrieves " "no rows"
+    )
+    assert cursor.rowcount in (-1, 0)
+
+    # cursor.fetchone should raise an Error if called after
+    # executing a query that cannnot return rows
+    cursor.execute("insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
+    with pytest.raises(driver.Error):
+        cursor.fetchone()
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    r = cursor.fetchone()
+    assert len(r) == 1, "cursor.fetchone should have retrieved a single row"
+    assert r[0] == "Victoria Bitter", "cursor.fetchone retrieved incorrect data"
+    assert (
+        cursor.fetchone() is None
+    ), "cursor.fetchone should return None if no more rows available"
+    assert cursor.rowcount in (-1, 1)
+
+
+samples = [
+    "Carlton Cold",
+    "Carlton Draft",
+    "Mountain Goat",
+    "Redback",
+    "Victoria Bitter",
+    "XXXX",
+]
+
+
+def _populate():
+    """Return a list of sql commands to setup the DB for the fetch
+    tests.
+    """
+    populate = [
+        "insert into %sbooze values ('%s')" % (table_prefix, s) for s in samples
+    ]
+    return populate
+
+
+def test_fetchmany(cursor):
+    # cursor.fetchmany should raise an Error if called without
+    # issuing a query
+    with pytest.raises(driver.Error):
+        cursor.fetchmany(4)
+
+    executeDDL1(cursor)
+    for sql in _populate():
+        cursor.execute(sql)
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    r = cursor.fetchmany()
+    assert len(r) == 1, (
+        "cursor.fetchmany retrieved incorrect number of rows, "
+        "default of arraysize is one."
+    )
+    cursor.arraysize = 10
+    r = cursor.fetchmany(3)  # Should get 3 rows
+    assert len(r) == 3, "cursor.fetchmany retrieved incorrect number of rows"
+    r = cursor.fetchmany(4)  # Should get 2 more
+    assert len(r) == 2, "cursor.fetchmany retrieved incorrect number of rows"
+    r = cursor.fetchmany(4)  # Should be an empty sequence
+    assert len(r) == 0, (
+        "cursor.fetchmany should return an empty sequence after "
+        "results are exhausted"
+    )
+    assert cursor.rowcount in (-1, 6)
+
+    # Same as above, using cursor.arraysize
+    cursor.arraysize = 4
+    cursor.execute("select name from %sbooze" % table_prefix)
+    r = cursor.fetchmany()  # Should get 4 rows
+    assert len(r) == 4, "cursor.arraysize not being honoured by fetchmany"
+    r = cursor.fetchmany()  # Should get 2 more
+    assert len(r) == 2
+    r = cursor.fetchmany()  # Should be an empty sequence
+    assert len(r) == 0
+    assert cursor.rowcount in (-1, 6)
+
+    cursor.arraysize = 6
+    cursor.execute("select name from %sbooze" % table_prefix)
+    rows = cursor.fetchmany()  # Should get all rows
+    assert cursor.rowcount in (-1, 6)
+    assert len(rows) == 6
+    assert len(rows) == 6
+    rows = [row[0] for row in rows]
+    rows.sort()
+
+    # Make sure we get the right data back out
+    for i in range(0, 6):
+        assert rows[i] == samples[i], "incorrect data retrieved by cursor.fetchmany"
+
+    rows = cursor.fetchmany()  # Should return an empty list
+    assert len(rows) == 0, (
+        "cursor.fetchmany should return an empty sequence if "
+        "called after the whole result set has been fetched"
+    )
+    assert cursor.rowcount in (-1, 6)
+
+    executeDDL2(cursor)
+    cursor.execute("select name from %sbarflys" % table_prefix)
+    r = cursor.fetchmany()  # Should get empty sequence
+    assert len(r) == 0, (
+        "cursor.fetchmany should return an empty sequence if " "query retrieved no rows"
+    )
+    assert cursor.rowcount in (-1, 0)
+
+
+def test_fetchall(cursor):
+    # cursor.fetchall should raise an Error if called
+    # without executing a query that may return rows (such
+    # as a select)
+    with pytest.raises(driver.Error):
+        cursor.fetchall()
+
+    executeDDL1(cursor)
+    for sql in _populate():
+        cursor.execute(sql)
+
+    # cursor.fetchall should raise an Error if called
+    # after executing a a statement that cannot return rows
+    with pytest.raises(driver.Error):
+        cursor.fetchall()
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    rows = cursor.fetchall()
+    assert cursor.rowcount in (-1, len(samples))
+    assert len(rows) == len(samples), "cursor.fetchall did not retrieve all rows"
+    rows = [r[0] for r in rows]
+    rows.sort()
+    for i in range(0, len(samples)):
+        assert rows[i] == samples[i], "cursor.fetchall retrieved incorrect rows"
+    rows = cursor.fetchall()
+    assert len(rows) == 0, (
+        "cursor.fetchall should return an empty list if called "
+        "after the whole result set has been fetched"
+    )
+    assert cursor.rowcount in (-1, len(samples))
+
+    executeDDL2(cursor)
+    cursor.execute("select name from %sbarflys" % table_prefix)
+    rows = cursor.fetchall()
+    assert cursor.rowcount in (-1, 0)
+    assert len(rows) == 0, (
+        "cursor.fetchall should return an empty list if "
+        "a select query returns no rows"
+    )
+
+
+def test_mixedfetch(cursor):
+    executeDDL1(cursor)
+    for sql in _populate():
+        cursor.execute(sql)
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    rows1 = cursor.fetchone()
+    rows23 = cursor.fetchmany(2)
+    rows4 = cursor.fetchone()
+    rows56 = cursor.fetchall()
+    assert cursor.rowcount in (-1, 6)
+    assert len(rows23) == 2, "fetchmany returned incorrect number of rows"
+    assert len(rows56) == 2, "fetchall returned incorrect number of rows"
+
+    rows = [rows1[0]]
+    rows.extend([rows23[0][0], rows23[1][0]])
+    rows.append(rows4[0])
+    rows.extend([rows56[0][0], rows56[1][0]])
+    rows.sort()
+    for i in range(0, len(samples)):
+        assert rows[i] == samples[i], "incorrect data retrieved or inserted"
+
+
+def help_nextset_setUp(cur):
+    """Should create a procedure called deleteme
+    that returns two result sets, first the
+    number of rows in booze then "name from booze"
+    """
+    raise NotImplementedError("Helper not implemented")
+
+
+def help_nextset_tearDown(cur):
+    "If cleaning up is needed after nextSetTest"
+    raise NotImplementedError("Helper not implemented")
+
+
+def test_nextset(cursor):
+    if not hasattr(cursor, "nextset"):
+        return
+
+    try:
+        executeDDL1(cursor)
+        sql = _populate()
+        for sql in _populate():
+            cursor.execute(sql)
+
+        help_nextset_setUp(cursor)
+
+        cursor.callproc("deleteme")
+        numberofrows = cursor.fetchone()
+        assert numberofrows[0] == len(samples)
+        assert cursor.nextset()
+        names = cursor.fetchall()
+        assert len(names) == len(samples)
+        s = cursor.nextset()
+        assert s is None, "No more return sets, should return None"
+    finally:
+        help_nextset_tearDown(cursor)
+
+
+def test_arraysize(cursor):
+    # Not much here - rest of the tests for this are in test_fetchmany
+    assert hasattr(cursor, "arraysize"), "cursor.arraysize must be defined"
+
+
+def test_setinputsizes(cursor):
+    cursor.setinputsizes(25)
+
+
+def test_setoutputsize_basic(cursor):
+    # Basic test is to make sure setoutputsize doesn't blow up
+    cursor.setoutputsize(1000)
+    cursor.setoutputsize(2000, 0)
+    _paraminsert(cursor)  # Make sure the cursor still works
+
+
+def test_None(cursor):
+    executeDDL1(cursor)
+    cursor.execute("insert into %sbooze values (NULL)" % table_prefix)
+    cursor.execute("select name from %sbooze" % table_prefix)
+    r = cursor.fetchall()
+    assert len(r) == 1
+    assert len(r[0]) == 1
+    assert r[0][0] is None, "NULL value not returned as None"
+
+
+def test_Date():
+    driver.Date(2002, 12, 25)
+    driver.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0)))
+    # Can we assume this? API doesn't specify, but it seems implied
+    # self.assertEqual(str(d1),str(d2))
+
+
+def test_Time():
+    driver.Time(13, 45, 30)
+    driver.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0)))
+    # Can we assume this? API doesn't specify, but it seems implied
+    # self.assertEqual(str(t1),str(t2))
+
+
+def test_Timestamp():
+    driver.Timestamp(2002, 12, 25, 13, 45, 30)
+    driver.TimestampFromTicks(time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0)))
+    # Can we assume this? API doesn't specify, but it seems implied
+    # self.assertEqual(str(t1),str(t2))
+
+
+def test_Binary():
+    driver.Binary(b"Something")
+    driver.Binary(b"")
+
+
+def test_STRING():
+    assert hasattr(driver, "STRING"), "module.STRING must be defined"
+
+
+def test_BINARY():
+    assert hasattr(driver, "BINARY"), "module.BINARY must be defined."
+
+
+def test_NUMBER():
+    assert hasattr(driver, "NUMBER"), "module.NUMBER must be defined."
+
+
+def test_DATETIME():
+    assert hasattr(driver, "DATETIME"), "module.DATETIME must be defined."
+
+
+def test_ROWID():
+    assert hasattr(driver, "ROWID"), "module.ROWID must be defined."
diff -pruN 1.10.6-3/test/dbapi/test_dbapi.py 1.23.0-0ubuntu1/test/dbapi/test_dbapi.py
--- 1.10.6-3/test/dbapi/test_dbapi.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_dbapi.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,206 @@
+import datetime
+import os
+import time
+
+import pytest
+
+import pg8000
+
+
+@pytest.fixture
+def has_tzset():
+
+    # Neither Windows nor Jython 2.5.3 have a time.tzset() so skip
+    if hasattr(time, "tzset"):
+        os.environ["TZ"] = "UTC"
+        time.tzset()
+        return True
+    return False
+
+
+# DBAPI compatible interface tests
+@pytest.fixture
+def db_table(con, has_tzset):
+    c = con.cursor()
+    c.execute(
+        "CREATE TEMPORARY TABLE t1 "
+        "(f1 int primary key, f2 int not null, f3 varchar(50) null) "
+        "ON COMMIT DROP"
+    )
+    c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, None))
+    c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 10, None))
+    c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 100, None))
+    c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (4, 1000, None))
+    c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (5, 10000, None))
+    return con
+
+
+def test_parallel_queries(db_table):
+    c1 = db_table.cursor()
+    c2 = db_table.cursor()
+
+    c1.execute("SELECT f1, f2, f3 FROM t1")
+    while 1:
+        row = c1.fetchone()
+        if row is None:
+            break
+        f1, f2, f3 = row
+        c2.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (f1,))
+        while 1:
+            row = c2.fetchone()
+            if row is None:
+                break
+            f1, f2, f3 = row
+
+
+def test_qmark(mocker, db_table):
+    mocker.patch("pg8000.dbapi.paramstyle", "qmark")
+    c1 = db_table.cursor()
+    c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > ?", (3,))
+    while 1:
+        row = c1.fetchone()
+        if row is None:
+            break
+        f1, f2, f3 = row
+
+
+def test_numeric(mocker, db_table):
+    mocker.patch("pg8000.dbapi.paramstyle", "numeric")
+    c1 = db_table.cursor()
+    c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > :1", (3,))
+    while 1:
+        row = c1.fetchone()
+        if row is None:
+            break
+        f1, f2, f3 = row
+
+
+def test_named(mocker, db_table):
+    mocker.patch("pg8000.dbapi.paramstyle", "named")
+    c1 = db_table.cursor()
+    c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > :f1", {"f1": 3})
+    while 1:
+        row = c1.fetchone()
+        if row is None:
+            break
+        f1, f2, f3 = row
+
+
+def test_format(mocker, db_table):
+    mocker.patch("pg8000.dbapi.paramstyle", "format")
+    c1 = db_table.cursor()
+    c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (3,))
+    while 1:
+        row = c1.fetchone()
+        if row is None:
+            break
+        f1, f2, f3 = row
+
+
+def test_pyformat(mocker, db_table):
+    mocker.patch("pg8000.dbapi.paramstyle", "pyformat")
+    c1 = db_table.cursor()
+    c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %(f1)s", {"f1": 3})
+    while 1:
+        row = c1.fetchone()
+        if row is None:
+            break
+        f1, f2, f3 = row
+
+
+def test_arraysize(db_table):
+    c1 = db_table.cursor()
+    c1.arraysize = 3
+    c1.execute("SELECT * FROM t1")
+    retval = c1.fetchmany()
+    assert len(retval) == c1.arraysize
+
+
+def test_date():
+    val = pg8000.Date(2001, 2, 3)
+    assert val == datetime.date(2001, 2, 3)
+
+
+def test_time():
+    val = pg8000.Time(4, 5, 6)
+    assert val == datetime.time(4, 5, 6)
+
+
+def test_timestamp():
+    val = pg8000.Timestamp(2001, 2, 3, 4, 5, 6)
+    assert val == datetime.datetime(2001, 2, 3, 4, 5, 6)
+
+
+def test_date_from_ticks(has_tzset):
+    if has_tzset:
+        val = pg8000.DateFromTicks(1173804319)
+        assert val == datetime.date(2007, 3, 13)
+
+
+def testTimeFromTicks(has_tzset):
+    if has_tzset:
+        val = pg8000.TimeFromTicks(1173804319)
+        assert val == datetime.time(16, 45, 19)
+
+
+def test_timestamp_from_ticks(has_tzset):
+    if has_tzset:
+        val = pg8000.TimestampFromTicks(1173804319)
+        assert val == datetime.datetime(2007, 3, 13, 16, 45, 19)
+
+
+def test_binary():
+    v = pg8000.Binary(b"\x00\x01\x02\x03\x02\x01\x00")
+    assert v == b"\x00\x01\x02\x03\x02\x01\x00"
+    assert isinstance(v, pg8000.BINARY)
+
+
+def test_row_count(db_table):
+    c1 = db_table.cursor()
+    c1.execute("SELECT * FROM t1")
+
+    assert 5 == c1.rowcount
+
+    c1.execute("UPDATE t1 SET f3 = %s WHERE f2 > 101", ("Hello!",))
+    assert 2 == c1.rowcount
+
+    c1.execute("DELETE FROM t1")
+    assert 5 == c1.rowcount
+
+
+def test_fetch_many(db_table):
+    cursor = db_table.cursor()
+    cursor.arraysize = 2
+    cursor.execute("SELECT * FROM t1")
+    assert 2 == len(cursor.fetchmany())
+    assert 2 == len(cursor.fetchmany())
+    assert 1 == len(cursor.fetchmany())
+    assert 0 == len(cursor.fetchmany())
+
+
+def test_iterator(db_table):
+    cursor = db_table.cursor()
+    cursor.execute("SELECT * FROM t1 ORDER BY f1")
+    f1 = 0
+    for row in cursor.fetchall():
+        next_f1 = row[0]
+        assert next_f1 > f1
+        f1 = next_f1
+
+
+# Vacuum can't be run inside a transaction, so we need to turn
+# autocommit on.
+def test_vacuum(con):
+    con.autocommit = True
+    cursor = con.cursor()
+    cursor.execute("vacuum")
+
+
+def test_prepared_statement(con):
+    cursor = con.cursor()
+    cursor.execute("PREPARE gen_series AS SELECT generate_series(1, 10);")
+    cursor.execute("EXECUTE gen_series")
+
+
+def test_cursor_type(cursor):
+    assert str(type(cursor)) == "<class 'pg8000.dbapi.Cursor'>"
diff -pruN 1.10.6-3/test/dbapi/test_paramstyle.py 1.23.0-0ubuntu1/test/dbapi/test_paramstyle.py
--- 1.10.6-3/test/dbapi/test_paramstyle.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_paramstyle.py	2021-11-13 10:04:09.000000000 +0000
@@ -0,0 +1,124 @@
+import pytest
+
+from pg8000.dbapi import convert_paramstyle
+
+#        "(id %% 2) = 0",
+
+
+@pytest.mark.parametrize(
+    "query,statement",
+    [
+        [
+            'SELECT ?, ?, "field_?" FROM t '
+            "WHERE a='say ''what?''' AND b=? AND c=E'?\\'test\\'?'",
+            'SELECT $1, $2, "field_?" FROM t WHERE '
+            "a='say ''what?''' AND b=$3 AND c=E'?\\'test\\'?'",
+        ],
+        [
+            "SELECT ?, ?, * FROM t WHERE a=? AND b='are you ''sure?'",
+            "SELECT $1, $2, * FROM t WHERE a=$3 AND b='are you ''sure?'",
+        ],
+    ],
+)
+def test_qmark(query, statement):
+    args = 1, 2, 3
+    new_query, vals = convert_paramstyle("qmark", query, args)
+    assert (new_query, vals) == (statement, args)
+
+
+@pytest.mark.parametrize(
+    "query,expected",
+    [
+        [
+            "SELECT sum(x)::decimal(5, 2) :2, :1, * FROM t WHERE a=:3",
+            "SELECT sum(x)::decimal(5, 2) $2, $1, * FROM t WHERE a=$3",
+        ],
+    ],
+)
+def test_numeric(query, expected):
+    args = 1, 2, 3
+    new_query, vals = convert_paramstyle("numeric", query, args)
+    assert (new_query, vals) == (expected, args)
+
+
+@pytest.mark.parametrize(
+    "query",
+    [
+        "make_interval(days := 10)",
+    ],
+)
+def test_numeric_unchanged(query):
+    args = 1, 2, 3
+    new_query, vals = convert_paramstyle("numeric", query, args)
+    assert (new_query, vals) == (query, args)
+
+
+def test_named():
+    args = {
+        "f_2": 1,
+        "f1": 2,
+    }
+    new_query, vals = convert_paramstyle(
+        "named", "SELECT sum(x)::decimal(5, 2) :f_2, :f1 FROM t WHERE a=:f_2", args
+    )
+    expected = "SELECT sum(x)::decimal(5, 2) $1, $2 FROM t WHERE a=$1"
+    assert (new_query, vals) == (expected, (1, 2))
+
+
+@pytest.mark.parametrize(
+    "query,expected",
+    [
+        [
+            "SELECT %s, %s, \"f1_%%\", E'txt_%%' "
+            "FROM t WHERE a=%s AND b='75%%' AND c = '%' -- Comment with %",
+            "SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$3 AND "
+            "b='75%%' AND c = '%' -- Comment with %",
+        ],
+        [
+            "SELECT -- Comment\n%s FROM t",
+            "SELECT -- Comment\n$1 FROM t",
+        ],
+    ],
+)
+def test_format_changed(query, expected):
+    args = 1, 2, 3
+    new_query, vals = convert_paramstyle("format", query, args)
+    assert (new_query, vals) == (expected, args)
+
+
+@pytest.mark.parametrize(
+    "query",
+    [
+        r"""COMMENT ON TABLE test_schema.comment_test """
+        r"""IS 'the test % '' " \ table comment'""",
+    ],
+)
+def test_format_unchanged(query):
+    args = 1, 2, 3
+    new_query, vals = convert_paramstyle("format", query, args)
+    assert (new_query, vals) == (query, args)
+
+
+def test_py_format():
+    args = {"f2": 1, "f1": 2, "f3": 3}
+
+    new_query, vals = convert_paramstyle(
+        "pyformat",
+        "SELECT %(f2)s, %(f1)s, \"f1_%%\", E'txt_%%' "
+        "FROM t WHERE a=%(f2)s AND b='75%%'",
+        args,
+    )
+    expected = "SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$1 AND " "b='75%%'"
+    assert (new_query, vals) == (expected, (1, 2))
+
+
+def test_pyformat_format():
+    """pyformat should support %s and an array, too:"""
+    args = 1, 2, 3
+    new_query, vals = convert_paramstyle(
+        "pyformat",
+        "SELECT %s, %s, \"f1_%%\", E'txt_%%' " "FROM t WHERE a=%s AND b='75%%'",
+        args,
+    )
+    expected = "SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$3 AND " "b='75%%'"
+    assert (new_query, vals) == (expected, args)
diff -pruN 1.10.6-3/test/dbapi/test_query.py 1.23.0-0ubuntu1/test/dbapi/test_query.py
--- 1.10.6-3/test/dbapi/test_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_query.py	2021-11-12 20:27:00.000000000 +0000
@@ -0,0 +1,325 @@
+from datetime import datetime as Datetime, timezone as Timezone
+
+import pytest
+
+import pg8000.dbapi
+from pg8000.converters import INET_ARRAY, INTEGER
+
+
+# Tests relating to the basic operation of the database driver, driven by the
+# pg8000 custom interface.
+
+
+@pytest.fixture
+def db_table(request, con):
+    con.paramstyle = "format"
+    cursor = con.cursor()
+    cursor.execute(
+        "CREATE TEMPORARY TABLE t1 (f1 int primary key, "
+        "f2 bigint not null, f3 varchar(50) null) "
+    )
+
+    def fin():
+        try:
+            cursor = con.cursor()
+            cursor.execute("drop table t1")
+        except pg8000.dbapi.DatabaseError:
+            pass
+
+    request.addfinalizer(fin)
+    return con
+
+
+def test_database_error(cursor):
+    with pytest.raises(pg8000.dbapi.DatabaseError):
+        cursor.execute("INSERT INTO t99 VALUES (1, 2, 3)")
+
+
+def test_parallel_queries(db_table):
+    cursor = db_table.cursor()
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, None))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 10, None))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 100, None))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (4, 1000, None))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (5, 10000, None))
+    c1 = db_table.cursor()
+    c2 = db_table.cursor()
+    c1.execute("SELECT f1, f2, f3 FROM t1")
+    for row in c1.fetchall():
+        f1, f2, f3 = row
+        c2.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (f1,))
+        for row in c2.fetchall():
+            f1, f2, f3 = row
+
+
+def test_parallel_open_portals(con):
+    c1 = con.cursor()
+    c2 = con.cursor()
+    c1count, c2count = 0, 0
+    q = "select * from generate_series(1, %s)"
+    params = (100,)
+    c1.execute(q, params)
+    c2.execute(q, params)
+    for c2row in c2.fetchall():
+        c2count += 1
+    for c1row in c1.fetchall():
+        c1count += 1
+
+    assert c1count == c2count
+
+
+# Run a query on a table, alter the structure of the table, then run the
+# original query again.
+
+
+def test_alter(db_table):
+    cursor = db_table.cursor()
+    cursor.execute("select * from t1")
+    cursor.execute("alter table t1 drop column f3")
+    cursor.execute("select * from t1")
+
+
+# Run a query on a table, drop then re-create the table, then run the
+# original query again.
+
+
+def test_create(db_table):
+    cursor = db_table.cursor()
+    cursor.execute("select * from t1")
+    cursor.execute("drop table t1")
+    cursor.execute("create temporary table t1 (f1 int primary key)")
+    cursor.execute("select * from t1")
+
+
+def test_insert_returning(db_table):
+    cursor = db_table.cursor()
+    cursor.execute("CREATE TEMPORARY TABLE t2 (id serial, data text)")
+
+    # Test INSERT ... RETURNING with one row...
+    cursor.execute("INSERT INTO t2 (data) VALUES (%s) RETURNING id", ("test1",))
+    row_id = cursor.fetchone()[0]
+    cursor.execute("SELECT data FROM t2 WHERE id = %s", (row_id,))
+    assert "test1" == cursor.fetchone()[0]
+
+    assert cursor.rowcount == 1
+
+    # Test with multiple rows...
+    cursor.execute(
+        "INSERT INTO t2 (data) VALUES (%s), (%s), (%s) " "RETURNING id",
+        ("test2", "test3", "test4"),
+    )
+    assert cursor.rowcount == 3
+    ids = tuple([x[0] for x in cursor.fetchall()])
+    assert len(ids) == 3
+
+
+def test_row_count(db_table):
+    cursor = db_table.cursor()
+    expected_count = 57
+    cursor.executemany(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
+        tuple((i, i, None) for i in range(expected_count)),
+    )
+
+    # Check rowcount after executemany
+    assert expected_count == cursor.rowcount
+
+    cursor.execute("SELECT * FROM t1")
+
+    # Check row_count without doing any reading first...
+    assert expected_count == cursor.rowcount
+
+    # Check rowcount after reading some rows, make sure it still
+    # works...
+    for i in range(expected_count // 2):
+        cursor.fetchone()
+    assert expected_count == cursor.rowcount
+
+    cursor = db_table.cursor()
+    # Restart the cursor, read a few rows, and then check rowcount
+    # again...
+    cursor.execute("SELECT * FROM t1")
+    for i in range(expected_count // 3):
+        cursor.fetchone()
+    assert expected_count == cursor.rowcount
+
+    # Should be -1 for a command with no results
+    cursor.execute("DROP TABLE t1")
+    assert -1 == cursor.rowcount
+
+
+def test_row_count_update(db_table):
+    cursor = db_table.cursor()
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, None))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 10, None))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 100, None))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (4, 1000, None))
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (5, 10000, None))
+    cursor.execute("UPDATE t1 SET f3 = %s WHERE f2 > 101", ("Hello!",))
+    assert cursor.rowcount == 2
+
+
+def test_int_oid(cursor):
+    # https://bugs.launchpad.net/pg8000/+bug/230796
+    cursor.execute("SELECT typname FROM pg_type WHERE oid = %s", (100,))
+
+
+def test_unicode_query(cursor):
+    cursor.execute(
+        "CREATE TEMPORARY TABLE \u043c\u0435\u0441\u0442\u043e "
+        "(\u0438\u043c\u044f VARCHAR(50), "
+        "\u0430\u0434\u0440\u0435\u0441 VARCHAR(250))"
+    )
+
+
+def test_executemany(db_table):
+    cursor = db_table.cursor()
+    cursor.executemany(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
+        ((1, 1, "Avast ye!"), (2, 1, None)),
+    )
+
+    cursor.executemany(
+        "select CAST(%s AS TIMESTAMP)",
+        ((Datetime(2014, 5, 7, tzinfo=Timezone.utc),), (Datetime(2014, 5, 7),)),
+    )
+
+
+def test_executemany_setinputsizes(cursor):
+    """Make sure that setinputsizes works for all the parameter sets"""
+
+    cursor.execute(
+        "CREATE TEMPORARY TABLE t1 (f1 int primary key, f2 inet[] not null) "
+    )
+
+    cursor.setinputsizes(INTEGER, INET_ARRAY)
+    cursor.executemany(
+        "INSERT INTO t1 (f1, f2) VALUES (%s, %s)", ((1, ["1.1.1.1"]), (2, ["0.0.0.0"]))
+    )
+
+
+def test_executemany_no_param_sets(cursor):
+    cursor.executemany("INSERT INTO t1 (f1, f2) VALUES (%s, %s)", [])
+    assert cursor.rowcount == -1
+
+
+# Check that autocommit stays off
+# We keep track of whether we're in a transaction or not by using the
+# READY_FOR_QUERY message.
+def test_transactions(db_table):
+    cursor = db_table.cursor()
+    cursor.execute("commit")
+    cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, "Zombie"))
+    cursor.execute("rollback")
+    cursor.execute("select * from t1")
+
+    assert cursor.rowcount == 0
+
+
+def test_in(cursor):
+    cursor.execute("SELECT typname FROM pg_type WHERE oid = any(%s)", ([16, 23],))
+    ret = cursor.fetchall()
+    assert ret[0][0] == "bool"
+
+
+def test_no_previous_tpc(con):
+    con.tpc_begin("Stacey")
+    cursor = con.cursor()
+    cursor.execute("SELECT * FROM pg_type")
+    con.tpc_commit()
+
+
+# Check that tpc_recover() doesn't start a transaction
+def test_tpc_recover(con):
+    con.tpc_recover()
+    cursor = con.cursor()
+    con.autocommit = True
+
+    # If tpc_recover() has started a transaction, this will fail
+    cursor.execute("VACUUM")
+
+
+def test_tpc_prepare(con):
+    xid = "Stacey"
+    con.tpc_begin(xid)
+    con.tpc_prepare()
+    con.tpc_rollback(xid)
+
+
+# An empty query should raise a ProgrammingError
+def test_empty_query(cursor):
+    with pytest.raises(pg8000.dbapi.DatabaseError):
+        cursor.execute("")
+
+
+# rolling back when not in a transaction doesn't generate a warning
+def test_rollback_no_transaction(con):
+    # Remove any existing notices
+    con.notices.clear()
+
+    # First, verify that a raw rollback does produce a notice
+    con.execute_unnamed("rollback")
+
+    assert 1 == len(con.notices)
+
+    # 25P01 is the code for no_active_sql_tronsaction. It has
+    # a message and severity name, but those might be
+    # localized/depend on the server version.
+    assert con.notices.pop().get(b"C") == b"25P01"
+
+    # Now going through the rollback method doesn't produce
+    # any notices because it knows we're not in a transaction.
+    con.rollback()
+
+    assert 0 == len(con.notices)
+
+
+def test_setinputsizes(con):
+    cursor = con.cursor()
+    cursor.setinputsizes(20)
+    cursor.execute("select %s", (None,))
+    retval = cursor.fetchall()
+    assert retval[0][0] is None
+
+
+def test_unexecuted_cursor_rowcount(con):
+    cursor = con.cursor()
+    assert cursor.rowcount == -1
+
+
+def test_unexecuted_cursor_description(con):
+    cursor = con.cursor()
+    assert cursor.description is None
+
+
+def test_callproc(cursor):
+    cursor.execute("select current_setting('server_version')")
+    version = cursor.fetchall()[0][0].split()[0]
+
+    if not (version.startswith("9") or version.startswith("10")):
+        cursor.execute(
+            """
+CREATE PROCEDURE echo(INOUT val text)
+  LANGUAGE plpgsql AS
+$proc$
+BEGIN
+END
+$proc$;
+"""
+        )
+
+        cursor.callproc("echo", ["hello"])
+        assert cursor.fetchall() == (["hello"],)
+
+
+def test_null_result(db_table):
+    cur = db_table.cursor()
+    cur.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, "a"))
+    with pytest.raises(pg8000.dbapi.ProgrammingError):
+        cur.fetchall()
+
+
+def test_not_parsed_if_no_params(mocker, cursor):
+    mock_convert_paramstyle = mocker.patch("pg8000.dbapi.convert_paramstyle")
+    cursor.execute("ROLLBACK")
+    mock_convert_paramstyle.assert_not_called()
diff -pruN 1.10.6-3/test/dbapi/test_typeconversion.py 1.23.0-0ubuntu1/test/dbapi/test_typeconversion.py
--- 1.10.6-3/test/dbapi/test_typeconversion.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/dbapi/test_typeconversion.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,779 @@
+import decimal
+import ipaddress
+import os
+import time
+import uuid
+from collections import OrderedDict
+from datetime import (
+    date as Date,
+    datetime as Datetime,
+    time as Time,
+    timedelta as Timedelta,
+    timezone as Timezone,
+)
+from enum import Enum
+from json import dumps
+
+import pytest
+
+import pytz
+
+import pg8000.dbapi
+from pg8000.converters import (
+    INTERVAL,
+    PGInterval,
+    interval_in,
+    pg_interval_in,
+    pg_interval_out,
+)
+
+
+# Type conversion tests
+
+
+def test_time_roundtrip(cursor):
+    t = Time(4, 5, 6)
+    cursor.execute("SELECT cast(%s as time) as f1", (t,))
+    assert cursor.fetchall()[0][0] == t
+
+
+def test_date_roundtrip(cursor):
+    v = Date(2001, 2, 3)
+    cursor.execute("SELECT cast(%s as date) as f1", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_bool_roundtrip(cursor):
+    b = True
+    cursor.execute("SELECT cast(%s as bool) as f1", (b,))
+    assert cursor.fetchall()[0][0] is b
+
+
+def test_null_roundtrip(cursor):
+    cursor.execute("select current_setting('server_version')")
+    version = cursor.fetchall()[0][0][:2]
+
+    if version.startswith("9"):
+        # Prior to PostgreSQL version 10 We can't just "SELECT %s" and set
+        # None as the parameter, since it has no type.  That would result
+        # in a PG error, "could not determine data type of parameter %s".
+        # So we create a temporary table, insert null values, and read them
+        # back.
+        cursor.execute(
+            "CREATE TEMPORARY TABLE TestNullWrite "
+            "(f1 int4, f2 timestamp, f3 varchar)"
+        )
+        cursor.execute(
+            "INSERT INTO TestNullWrite VALUES (%s, %s, %s)", (None, None, None)
+        )
+        cursor.execute("SELECT * FROM TestNullWrite")
+        assert cursor.fetchall()[0] == [None, None, None]
+
+        with pytest.raises(pg8000.exceptions.DatabaseError):
+            cursor.execute("SELECT %s as f1", (None,))
+    else:
+        cursor.execute("SELECT %s", (None,))
+        assert cursor.fetchall()[0][0] is None
+
+
+def test_decimal_roundtrip(cursor):
+    values = ("1.1", "-1.1", "10000", "20000", "-1000000000.123456789", "1.0", "12.44")
+    for v in values:
+        cursor.execute("SELECT CAST(%s AS NUMERIC)", (decimal.Decimal(v),))
+        retval = cursor.fetchall()
+        assert str(retval[0][0]) == v
+
+
+def test_float_roundtrip(cursor):
+    val = 1.756e-12
+    cursor.execute("SELECT cast(%s as double precision)", (val,))
+    assert cursor.fetchall()[0][0] == val
+
+
+def test_float_plus_infinity_roundtrip(cursor):
+    v = float("inf")
+    cursor.execute("SELECT cast(%s as double precision)", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_str_roundtrip(cursor):
+    v = "hello world"
+    cursor.execute("create temporary table test_str (f character varying(255))")
+    cursor.execute("INSERT INTO test_str VALUES (%s)", (v,))
+    cursor.execute("SELECT * from test_str")
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_str_then_int(cursor):
+    v1 = "hello world"
+    cursor.execute("SELECT cast(%s as varchar) as f1", (v1,))
+    assert cursor.fetchall()[0][0] == v1
+
+    v2 = 1
+    cursor.execute("SELECT cast(%s as varchar) as f1", (v2,))
+    assert cursor.fetchall()[0][0] == str(v2)
+
+
+def test_unicode_roundtrip(cursor):
+    v = "hello \u0173 world"
+    cursor.execute("SELECT cast(%s as varchar) as f1", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_long_roundtrip(cursor):
+    v = 50000000000000
+    cursor.execute("SELECT cast(%s as bigint)", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_int_execute_many_select(cursor):
+    cursor.executemany("SELECT CAST(%s AS INTEGER)", ((1,), (40000,)))
+    cursor.fetchall()
+
+
+def test_int_execute_many_insert(cursor):
+    v = ([None], [4])
+    cursor.execute("create temporary table test_int (f integer)")
+    cursor.executemany("INSERT INTO test_int VALUES (%s)", v)
+    cursor.execute("SELECT * from test_int")
+    assert cursor.fetchall() == v
+
+
+def test_insert_null(cursor):
+    v = None
+    cursor.execute("CREATE TEMPORARY TABLE test_int (f INTEGER)")
+    cursor.execute("INSERT INTO test_int VALUES (%s)", (v,))
+    cursor.execute("SELECT * FROM test_int")
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_int_roundtrip(con, cursor):
+    int2 = 21
+    int4 = 23
+    int8 = 20
+
+    MAP = {
+        int2: "int2",
+        int4: "int4",
+        int8: "int8",
+    }
+
+    test_values = [
+        (0, int2),
+        (-32767, int2),
+        (-32768, int4),
+        (+32767, int2),
+        (+32768, int4),
+        (-2147483647, int4),
+        (-2147483648, int8),
+        (+2147483647, int4),
+        (+2147483648, int8),
+        (-9223372036854775807, int8),
+        (+9223372036854775807, int8),
+    ]
+
+    for value, typoid in test_values:
+        cursor.execute("SELECT cast(%s as " + MAP[typoid] + ")", (value,))
+        assert cursor.fetchall()[0][0] == value
+        column_name, column_typeoid = cursor.description[0][0:2]
+        assert column_typeoid == typoid
+
+
+def test_bytea_roundtrip(cursor):
+    cursor.execute(
+        "SELECT cast(%s as bytea)", (pg8000.Binary(b"\x00\x01\x02\x03\x02\x01\x00"),)
+    )
+    assert cursor.fetchall()[0][0] == b"\x00\x01\x02\x03\x02\x01\x00"
+
+
+def test_bytearray_round_trip(cursor):
+    binary = b"\x00\x01\x02\x03\x02\x01\x00"
+    cursor.execute("SELECT cast(%s as bytea)", (bytearray(binary),))
+    assert cursor.fetchall()[0][0] == binary
+
+
+def test_bytearray_subclass_round_trip(cursor):
+    class BClass(bytearray):
+        pass
+
+    binary = b"\x00\x01\x02\x03\x02\x01\x00"
+    cursor.execute("SELECT cast(%s as bytea)", (BClass(binary),))
+    assert cursor.fetchall()[0][0] == binary
+
+
+def test_timestamp_roundtrip(is_java, cursor):
+    v = Datetime(2001, 2, 3, 4, 5, 6, 170000)
+    cursor.execute("SELECT cast(%s as timestamp)", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+    # Test that time zone doesn't affect it
+    # Jython 2.5.3 doesn't have a time.tzset() so skip
+    if not is_java:
+        orig_tz = os.environ.get("TZ")
+        os.environ["TZ"] = "America/Edmonton"
+        time.tzset()
+
+        cursor.execute("SELECT cast(%s as timestamp)", (v,))
+        assert cursor.fetchall()[0][0] == v
+
+        if orig_tz is None:
+            del os.environ["TZ"]
+        else:
+            os.environ["TZ"] = orig_tz
+        time.tzset()
+
+
+def test_pg_interval_repr():
+    v = PGInterval(microseconds=123456789, days=2, months=24)
+    assert repr(v) == "<PGInterval 24 months 2 days 123456789 microseconds>"
+
+
+def test_pg_interval_in_1_year():
+    assert pg_interval_in("1 year") == PGInterval(years=1)
+
+
+def test_interval_in_2_months():
+    assert interval_in("2 hours")
+
+
+def test_pg_interval_roundtrip(con, cursor):
+    con.register_in_adapter(INTERVAL, pg_interval_in)
+    con.register_out_adapter(PGInterval, pg_interval_out)
+    v = PGInterval(microseconds=123456789, days=2, months=24)
+    cursor.execute("SELECT cast(%s as interval)", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_interval_roundtrip(cursor):
+    v = Timedelta(seconds=30)
+    cursor.execute("SELECT cast(%s as interval)", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_enum_str_round_trip(cursor):
+    try:
+        cursor.execute("create type lepton as enum ('electron', 'muon', 'tau')")
+
+        v = "muon"
+        cursor.execute("SELECT cast(%s as lepton) as f1", (v,))
+        retval = cursor.fetchall()
+        assert retval[0][0] == v
+        cursor.execute("CREATE TEMPORARY TABLE testenum (f1 lepton)")
+        cursor.execute(
+            "INSERT INTO testenum VALUES (cast(%s as lepton))", ("electron",)
+        )
+    finally:
+        cursor.execute("drop table testenum")
+        cursor.execute("drop type lepton")
+
+
+def test_enum_custom_round_trip(con, cursor):
+    class Lepton:
+        # Implements PEP 435 in the minimal fashion needed
+        __members__ = OrderedDict()
+
+        def __init__(self, name, value, alias=None):
+            self.name = name
+            self.value = value
+            self.__members__[name] = self
+            setattr(self.__class__, name, self)
+            if alias:
+                self.__members__[alias] = self
+                setattr(self.__class__, alias, self)
+
+    def lepton_out(lepton):
+        return lepton.value
+
+    try:
+        cursor.execute("create type lepton as enum ('1', '2', '3')")
+        con.register_out_adapter(Lepton, lepton_out)
+
+        v = Lepton("muon", "2")
+        cursor.execute("SELECT CAST(%s AS lepton)", (v,))
+        assert cursor.fetchall()[0][0] == v.value
+    finally:
+        cursor.execute("drop type lepton")
+
+
+def test_enum_py_round_trip(cursor):
+    class Lepton(Enum):
+        electron = "1"
+        muon = "2"
+        tau = "3"
+
+    try:
+        cursor.execute("create type lepton as enum ('1', '2', '3')")
+
+        v = Lepton.muon
+        cursor.execute("SELECT cast(%s as lepton) as f1", (v,))
+        assert cursor.fetchall()[0][0] == v.value
+
+        cursor.execute("CREATE TEMPORARY TABLE testenum (f1 lepton)")
+        cursor.execute(
+            "INSERT INTO testenum VALUES (cast(%s as lepton))", (Lepton.electron,)
+        )
+    finally:
+        cursor.execute("drop table testenum")
+        cursor.execute("drop type lepton")
+
+
+def test_xml_roundtrip(cursor):
+    v = "<genome>gatccgagtac</genome>"
+    cursor.execute("select xmlparse(content %s) as f1", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_uuid_roundtrip(cursor):
+    v = uuid.UUID("911460f2-1f43-fea2-3e2c-e01fd5b5069d")
+    cursor.execute("select cast(%s as uuid)", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_inet_roundtrip_network(cursor):
+    v = ipaddress.ip_network("192.168.0.0/28")
+    cursor.execute("select cast(%s as inet)", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_inet_roundtrip_address(cursor):
+    v = ipaddress.ip_address("192.168.0.1")
+    cursor.execute("select cast(%s as inet)", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_xid_roundtrip(cursor):
+    v = 86722
+    cursor.execute("select cast(cast(%s as varchar) as xid) as f1", (v,))
+    retval = cursor.fetchall()
+    assert retval[0][0] == v
+
+    # Should complete without an exception
+    cursor.execute("select * from pg_locks where transactionid = %s", (97712,))
+    retval = cursor.fetchall()
+
+
+def test_int2vector_in(cursor):
+    cursor.execute("select cast('1 2' as int2vector) as f1")
+    assert cursor.fetchall()[0][0] == [1, 2]
+
+    # Should complete without an exception
+    cursor.execute("select indkey from pg_index")
+    cursor.fetchall()
+
+
+def test_timestamp_tz_out(cursor):
+    cursor.execute(
+        "SELECT '2001-02-03 04:05:06.17 America/Edmonton'" "::timestamp with time zone"
+    )
+    retval = cursor.fetchall()
+    dt = retval[0][0]
+    assert dt.tzinfo is not None, "no tzinfo returned"
+    assert dt.astimezone(Timezone.utc) == Datetime(
+        2001, 2, 3, 11, 5, 6, 170000, Timezone.utc
+    ), "retrieved value match failed"
+
+
+def test_timestamp_tz_roundtrip(is_java, cursor):
+    if not is_java:
+        mst = pytz.timezone("America/Edmonton")
+        v1 = mst.localize(Datetime(2001, 2, 3, 4, 5, 6, 170000))
+        cursor.execute("SELECT cast(%s as timestamptz)", (v1,))
+        v2 = cursor.fetchall()[0][0]
+        assert v2.tzinfo is not None
+        assert v1 == v2
+
+
+def test_timestamp_mismatch(is_java, cursor):
+    if not is_java:
+        mst = pytz.timezone("America/Edmonton")
+        cursor.execute("SET SESSION TIME ZONE 'America/Edmonton'")
+        try:
+            cursor.execute(
+                "CREATE TEMPORARY TABLE TestTz "
+                "(f1 timestamp with time zone, "
+                "f2 timestamp without time zone)"
+            )
+            cursor.execute(
+                "INSERT INTO TestTz (f1, f2) VALUES (%s, %s)",
+                (
+                    # insert timestamp into timestamptz field (v1)
+                    Datetime(2001, 2, 3, 4, 5, 6, 170000),
+                    # insert timestamptz into timestamp field (v2)
+                    mst.localize(Datetime(2001, 2, 3, 4, 5, 6, 170000)),
+                ),
+            )
+            cursor.execute("SELECT f1, f2 FROM TestTz")
+            retval = cursor.fetchall()
+
+            # when inserting a timestamp into a timestamptz field,
+            # postgresql assumes that it is in local time. So the value
+            # that comes out will be the server's local time interpretation
+            # of v1. We've set the server's TZ to MST, the time should
+            # be...
+            f1 = retval[0][0]
+            assert f1 == Datetime(2001, 2, 3, 11, 5, 6, 170000, Timezone.utc)
+
+            # inserting the timestamptz into a timestamp field, pg8000
+            # converts the value into UTC, and then the PG server converts
+            # it into local time for insertion into the field. When we
+            # query for it, we get the same time back, like the tz was
+            # dropped.
+            f2 = retval[0][1]
+            assert f2 == Datetime(2001, 2, 3, 11, 5, 6, 170000)
+        finally:
+            cursor.execute("SET SESSION TIME ZONE DEFAULT")
+
+
+def test_name_out(cursor):
+    # select a field that is of "name" type:
+    cursor.execute("SELECT usename FROM pg_user")
+    cursor.fetchall()
+    # It is sufficient that no errors were encountered.
+
+
+def test_oid_out(cursor):
+    cursor.execute("SELECT oid FROM pg_type")
+    cursor.fetchall()
+    # It is sufficient that no errors were encountered.
+
+
+def test_boolean_in(cursor):
+    cursor.execute("SELECT cast('t' as bool)")
+    assert cursor.fetchall()[0][0]
+
+
+def test_numeric_out(cursor):
+    for num in ("5000", "50.34"):
+        cursor.execute("SELECT " + num + "::numeric")
+        assert str(cursor.fetchall()[0][0]) == num
+
+
+def test_int2_out(cursor):
+    cursor.execute("SELECT 5000::smallint")
+    assert cursor.fetchall()[0][0] == 5000
+
+
+def test_int4_out(cursor):
+    cursor.execute("SELECT 5000::integer")
+    assert cursor.fetchall()[0][0] == 5000
+
+
+def test_int8_out(cursor):
+    cursor.execute("SELECT 50000000000000::bigint")
+    assert cursor.fetchall()[0][0] == 50000000000000
+
+
+def test_float4_out(cursor):
+    cursor.execute("SELECT 1.1::real")
+    assert cursor.fetchall()[0][0] == 1.1
+
+
+def test_float8_out(cursor):
+    cursor.execute("SELECT 1.1::double precision")
+    assert cursor.fetchall()[0][0] == 1.1000000000000001
+
+
+def test_varchar_out(cursor):
+    cursor.execute("SELECT 'hello'::varchar(20)")
+    assert cursor.fetchall()[0][0] == "hello"
+
+
+def test_char_out(cursor):
+    cursor.execute("SELECT 'hello'::char(20)")
+    assert cursor.fetchall()[0][0] == "hello               "
+
+
+def test_text_out(cursor):
+    cursor.execute("SELECT 'hello'::text")
+    assert cursor.fetchall()[0][0] == "hello"
+
+
+def test_interval_in(con, cursor):
+    con.register_in_adapter(INTERVAL, pg_interval_in)
+    cursor.execute("SELECT '1 month 16 days 12 hours 32 minutes 64 seconds'::interval")
+    expected_value = PGInterval(
+        microseconds=(12 * 60 * 60 * 1000 * 1000)
+        + (32 * 60 * 1000 * 1000)
+        + (64 * 1000 * 1000),
+        days=16,
+        months=1,
+    )
+    assert cursor.fetchall()[0][0] == expected_value
+
+
+def test_interval_in_30_seconds(cursor):
+    cursor.execute("select interval '30 seconds'")
+    assert cursor.fetchall()[0][0] == Timedelta(seconds=30)
+
+
+def test_interval_in_12_days_30_seconds(cursor):
+    cursor.execute("select interval '12 days 30 seconds'")
+    assert cursor.fetchall()[0][0] == Timedelta(days=12, seconds=30)
+
+
+def test_timestamp_out(cursor):
+    cursor.execute("SELECT '2001-02-03 04:05:06.17'::timestamp")
+    retval = cursor.fetchall()
+    assert retval[0][0] == Datetime(2001, 2, 3, 4, 5, 6, 170000)
+
+
+def test_int4_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::INT[] AS f1, '{{1,2,3},{4,5,6}}'::INT[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_int2_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::INT2[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::INT2[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT2[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_int8_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::INT8[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::INT8[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT8[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_bool_array_out(cursor):
+    cursor.execute(
+        "SELECT '{TRUE,FALSE,FALSE,TRUE}'::BOOL[] AS f1, "
+        "'{{TRUE,FALSE,TRUE},{FALSE,TRUE,FALSE}}'::BOOL[][] AS f2, "
+        "'{{{TRUE,FALSE},{FALSE,TRUE}},{{NULL,TRUE},{FALSE,FALSE}}}'"
+        "::BOOL[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [True, False, False, True]
+    assert f2 == [[True, False, True], [False, True, False]]
+    assert f3 == [[[True, False], [False, True]], [[None, True], [False, False]]]
+
+
+def test_float4_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::FLOAT4[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::FLOAT4[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::FLOAT4[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_float8_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::FLOAT8[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::FLOAT8[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::FLOAT8[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_int_array_roundtrip_small(cursor):
+    """send small int array, should be sent as INT2[]"""
+    cursor.execute("SELECT cast(%s as int2[])", ([1, 2, 3],))
+    assert cursor.fetchall()[0][0], [1, 2, 3]
+    column_name, column_typeoid = cursor.description[0][0:2]
+    assert column_typeoid == 1005, "type should be INT2[]"
+
+
+def test_int_array_roundtrip_multi(cursor):
+    """test multi-dimensional array, should be sent as INT2[]"""
+    cursor.execute("SELECT cast(%s as int2[])", ([[1, 2], [3, 4]],))
+    assert cursor.fetchall()[0][0] == [[1, 2], [3, 4]]
+
+    column_name, column_typeoid = cursor.description[0][0:2]
+    assert column_typeoid == 1005, "type should be INT2[]"
+
+
+def test_int4_array_roundtrip(cursor):
+    """a larger value should kick it up to INT4[]..."""
+    cursor.execute("SELECT cast(%s as int4[])", ([70000, 2, 3],))
+    assert cursor.fetchall()[0][0] == [70000, 2, 3]
+    column_name, column_typeoid = cursor.description[0][0:2]
+    assert column_typeoid == 1007, "type should be INT4[]"
+
+
+def test_int8_array_roundtrip(cursor):
+    """a much larger value should kick it up to INT8[]..."""
+    cursor.execute("SELECT cast(%s as int8[])", ([7000000000, 2, 3],))
+    assert cursor.fetchall()[0][0] == [7000000000, 2, 3], "retrieved value match failed"
+    column_name, column_typeoid = cursor.description[0][0:2]
+    assert column_typeoid == 1016, "type should be INT8[]"
+
+
+def test_int_array_with_null_roundtrip(cursor):
+    cursor.execute("SELECT cast(%s as int[])", ([1, None, 3],))
+    assert cursor.fetchall()[0][0] == [1, None, 3]
+
+
+def test_float_array_roundtrip(cursor):
+    cursor.execute("SELECT cast(%s as double precision[])", ([1.1, 2.2, 3.3],))
+    assert cursor.fetchall()[0][0] == [1.1, 2.2, 3.3]
+
+
+def test_bool_array_roundtrip(cursor):
+    cursor.execute("SELECT cast(%s as bool[])", ([True, False, None],))
+    assert cursor.fetchall()[0][0] == [True, False, None]
+
+
+@pytest.mark.parametrize(
+    "test_input,expected",
+    [
+        ("SELECT '{a,b,c}'::TEXT[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{a,b,c}'::CHAR[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{a,b,c}'::VARCHAR[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{a,b,c}'::CSTRING[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{a,b,c}'::NAME[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{}'::text[];", []),
+        ('SELECT \'{NULL,"NULL",NULL,""}\'::text[];', [None, "NULL", None, ""]),
+    ],
+)
+def test_string_array_out(cursor, test_input, expected):
+    cursor.execute(test_input)
+    assert cursor.fetchall()[0][0] == expected
+
+
+def test_numeric_array_out(cursor):
+    cursor.execute("SELECT '{1.1,2.2,3.3}'::numeric[] AS f1")
+    assert cursor.fetchone()[0] == [
+        decimal.Decimal("1.1"),
+        decimal.Decimal("2.2"),
+        decimal.Decimal("3.3"),
+    ]
+
+
+def test_numeric_array_roundtrip(cursor):
+    v = [decimal.Decimal("1.1"), None, decimal.Decimal("3.3")]
+    cursor.execute("SELECT cast(%s as numeric[])", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_string_array_roundtrip(cursor):
+    v = [
+        "Hello!",
+        "World!",
+        "abcdefghijklmnopqrstuvwxyz",
+        "",
+        "A bunch of random characters:",
+        " ~!@#$%^&*()_+`1234567890-=[]\\{}|{;':\",./<>?\t",
+        None,
+    ]
+    cursor.execute("SELECT cast(%s as varchar[])", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_array_string_escape():
+    v = '"'
+    res = pg8000.converters.array_string_escape(v)
+    assert res == '"\\""'
+
+
+def test_empty_array(cursor):
+    v = []
+    cursor.execute("SELECT cast(%s as varchar[])", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_macaddr(cursor):
+    cursor.execute("SELECT macaddr '08002b:010203'")
+    assert cursor.fetchall()[0][0] == "08:00:2b:01:02:03"
+
+
+def test_tsvector_roundtrip(cursor):
+    cursor.execute(
+        "SELECT cast(%s as tsvector)", ("a fat cat sat on a mat and ate a fat rat",)
+    )
+    retval = cursor.fetchall()
+    assert retval[0][0] == "'a' 'and' 'ate' 'cat' 'fat' 'mat' 'on' 'rat' 'sat'"
+
+
+def test_hstore_roundtrip(cursor):
+    val = '"a"=>"1"'
+    cursor.execute("SELECT cast(%s as hstore)", (val,))
+    assert cursor.fetchall()[0][0] == val
+
+
+def test_json_roundtrip(cursor):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    cursor.execute("SELECT cast(%s as jsonb)", (dumps(val),))
+    assert cursor.fetchall()[0][0] == val
+
+
+def test_jsonb_roundtrip(cursor):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    cursor.execute("SELECT cast(%s as jsonb)", (dumps(val),))
+    retval = cursor.fetchall()
+    assert retval[0][0] == val
+
+
+def test_json_access_object(cursor):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    cursor.execute("SELECT cast(%s as json) -> %s", (dumps(val), "name"))
+    retval = cursor.fetchall()
+    assert retval[0][0] == "Apollo 11 Cave"
+
+
+def test_jsonb_access_object(cursor):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    cursor.execute("SELECT cast(%s as jsonb) -> %s", (dumps(val), "name"))
+    retval = cursor.fetchall()
+    assert retval[0][0] == "Apollo 11 Cave"
+
+
+def test_json_access_array(cursor):
+    val = [-1, -2, -3, -4, -5]
+    cursor.execute("SELECT cast(%s as json) -> cast(%s as int)", (dumps(val), 2))
+    assert cursor.fetchall()[0][0] == -3
+
+
+def test_jsonb_access_array(cursor):
+    val = [-1, -2, -3, -4, -5]
+    cursor.execute("SELECT cast(%s as jsonb) -> cast(%s as int)", (dumps(val), 2))
+    assert cursor.fetchall()[0][0] == -3
+
+
+def test_jsonb_access_path(cursor):
+    j = {"a": [1, 2, 3], "b": [4, 5, 6]}
+
+    path = ["a", "2"]
+
+    cursor.execute("SELECT cast(%s as jsonb) #>> %s", (dumps(j), path))
+    assert cursor.fetchall()[0][0] == str(j[path[0]][int(path[1])])
+
+
+def test_infinity_timestamp_roundtrip(cursor):
+    v = "infinity"
+    cursor.execute("SELECT cast(%s as timestamp) as f1", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_point_roundtrip(cursor):
+    v = "(2.3,1)"
+    cursor.execute("SELECT cast(%s as point) as f1", (v,))
+    assert cursor.fetchall()[0][0] == v
+
+
+def test_time_in():
+    actual = pg8000.converters.time_in("12:57:18.000396")
+    assert actual == Time(12, 57, 18, 396)
diff -pruN 1.10.6-3/test/legacy/conftest.py 1.23.0-0ubuntu1/test/legacy/conftest.py
--- 1.10.6-3/test/legacy/conftest.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/conftest.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,58 @@
+import sys
+from os import environ
+
+import pytest
+
+import pg8000
+
+
+@pytest.fixture(scope="class")
+def db_kwargs():
+    db_connect = {"user": "postgres", "password": "pw"}
+
+    for kw, var, f in [
+        ("host", "PGHOST", str),
+        ("password", "PGPASSWORD", str),
+        ("port", "PGPORT", int),
+    ]:
+        try:
+            db_connect[kw] = f(environ[var])
+        except KeyError:
+            pass
+
+    return db_connect
+
+
+@pytest.fixture
+def con(request, db_kwargs):
+    conn = pg8000.connect(**db_kwargs)
+
+    def fin():
+        try:
+            conn.rollback()
+        except pg8000.InterfaceError:
+            pass
+
+        try:
+            conn.close()
+        except pg8000.InterfaceError:
+            pass
+
+    request.addfinalizer(fin)
+    return conn
+
+
+@pytest.fixture
+def cursor(request, con):
+    cursor = con.cursor()
+
+    def fin():
+        cursor.close()
+
+    request.addfinalizer(fin)
+    return cursor
+
+
+@pytest.fixture
+def is_java():
+    return "java" in sys.platform.lower()
diff -pruN 1.10.6-3/test/legacy/github-actions/gss_legacy.py 1.23.0-0ubuntu1/test/legacy/github-actions/gss_legacy.py
--- 1.10.6-3/test/legacy/github-actions/gss_legacy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/github-actions/gss_legacy.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,14 @@
+import pytest
+
+import pg8000.dbapi
+
+
+def test_gss(db_kwargs):
+    """Called by GitHub Actions with auth method gss."""
+
+    # Should raise an exception saying gss isn't supported
+    with pytest.raises(
+        pg8000.dbapi.InterfaceError,
+        match="Authentication method 7 not supported by pg8000.",
+    ):
+        pg8000.dbapi.connect(**db_kwargs)
diff -pruN 1.10.6-3/test/legacy/github-actions/md5_legacy.py 1.23.0-0ubuntu1/test/legacy/github-actions/md5_legacy.py
--- 1.10.6-3/test/legacy/github-actions/md5_legacy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/github-actions/md5_legacy.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_md5(con):
+    """Called by GitHub Actions with auth method md5.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/legacy/github-actions/password_legacy.py 1.23.0-0ubuntu1/test/legacy/github-actions/password_legacy.py
--- 1.10.6-3/test/legacy/github-actions/password_legacy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/github-actions/password_legacy.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_password(con):
+    """Called by GitHub Actions with auth method password.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/legacy/github-actions/scram-sha-256_legacy.py 1.23.0-0ubuntu1/test/legacy/github-actions/scram-sha-256_legacy.py
--- 1.10.6-3/test/legacy/github-actions/scram-sha-256_legacy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/github-actions/scram-sha-256_legacy.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_scram_sha_256(con):
+    """Called by GitHub Actions with auth method scram-sha-256.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/legacy/github-actions/ssl_legacy.py 1.23.0-0ubuntu1/test/legacy/github-actions/ssl_legacy.py
--- 1.10.6-3/test/legacy/github-actions/ssl_legacy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/github-actions/ssl_legacy.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,63 @@
+import ssl
+import sys
+
+import pytest
+
+import pg8000.dbapi
+
+
+# Check if running in Jython
+if "java" in sys.platform:
+    from javax.net.ssl import TrustManager, X509TrustManager
+    from jarray import array
+    from javax.net.ssl import SSLContext
+
+    class TrustAllX509TrustManager(X509TrustManager):
+        """Define a custom TrustManager which will blindly accept all
+        certificates"""
+
+        def checkClientTrusted(self, chain, auth):
+            pass
+
+        def checkServerTrusted(self, chain, auth):
+            pass
+
+        def getAcceptedIssuers(self):
+            return None
+
+    # Create a static reference to an SSLContext which will use
+    # our custom TrustManager
+    trust_managers = array([TrustAllX509TrustManager()], TrustManager)
+    TRUST_ALL_CONTEXT = SSLContext.getInstance("SSL")
+    TRUST_ALL_CONTEXT.init(None, trust_managers, None)
+    # Keep a static reference to the JVM's default SSLContext for restoring
+    # at a later time
+    DEFAULT_CONTEXT = SSLContext.getDefault()
+
+
+@pytest.fixture
+def trust_all_certificates(request):
+    """Decorator function that will make it so the context of the decorated
+    method will run with our TrustManager that accepts all certificates"""
+    # Only do this if running under Jython
+    is_java = "java" in sys.platform
+
+    if is_java:
+        from javax.net.ssl import SSLContext
+
+        SSLContext.setDefault(TRUST_ALL_CONTEXT)
+
+    def fin():
+        if is_java:
+            SSLContext.setDefault(DEFAULT_CONTEXT)
+
+    request.addfinalizer(fin)
+
+
+@pytest.mark.usefixtures("trust_all_certificates")
+def test_ssl(db_kwargs):
+    context = ssl.SSLContext()
+    context.check_hostname = False
+    db_kwargs["ssl_context"] = context
+    with pg8000.dbapi.connect(**db_kwargs):
+        pass
diff -pruN 1.10.6-3/test/legacy/stress.py 1.23.0-0ubuntu1/test/legacy/stress.py
--- 1.10.6-3/test/legacy/stress.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/stress.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,26 @@
+from contextlib import closing
+
+import pg8000
+from pg8000.tests.connection_settings import db_connect
+
+
+with closing(pg8000.connect(**db_connect)) as db:
+    for i in range(100):
+        cursor = db.cursor()
+        cursor.execute(
+            """
+        SELECT n.nspname as "Schema",
+          pg_catalog.format_type(t.oid, NULL) AS "Name",
+            pg_catalog.obj_description(t.oid, 'pg_type') as "Description"
+            FROM pg_catalog.pg_type t
+                 LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+                 left join pg_catalog.pg_namespace kj on n.oid = t.typnamespace
+                 WHERE (t.typrelid = 0
+                    OR (SELECT c.relkind = 'c'
+                        FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid))
+                    AND NOT EXISTS(
+                        SELECT 1 FROM pg_catalog.pg_type el
+                        WHERE el.oid = t.typelem AND el.typarray = t.oid)
+                     AND pg_catalog.pg_type_is_visible(t.oid)
+                     ORDER BY 1, 2;"""
+        )
diff -pruN 1.10.6-3/test/legacy/test_auth.py 1.23.0-0ubuntu1/test/legacy/test_auth.py
--- 1.10.6-3/test/legacy/test_auth.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_auth.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,77 @@
+import ssl
+import sys
+
+import pytest
+
+import pg8000.legacy
+
+
+def testGss(db_kwargs):
+    """This requires a line in pg_hba.conf that requires gss for the database
+    pg8000_gss
+    """
+    db_kwargs["database"] = "pg8000_gss"
+
+    # Should raise an exception saying gss isn't supported
+    with pytest.raises(
+        pg8000.legacy.InterfaceError,
+        match="Authentication method 7 not supported by pg8000.",
+    ):
+        pg8000.legacy.connect(**db_kwargs)
+
+
+# Check if running in Jython
+if "java" in sys.platform:
+    from javax.net.ssl import TrustManager, X509TrustManager
+    from jarray import array
+    from javax.net.ssl import SSLContext
+
+    class TrustAllX509TrustManager(X509TrustManager):
+        """Define a custom TrustManager which will blindly accept all
+        certificates"""
+
+        def checkClientTrusted(self, chain, auth):
+            pass
+
+        def checkServerTrusted(self, chain, auth):
+            pass
+
+        def getAcceptedIssuers(self):
+            return None
+
+    # Create a static reference to an SSLContext which will use
+    # our custom TrustManager
+    trust_managers = array([TrustAllX509TrustManager()], TrustManager)
+    TRUST_ALL_CONTEXT = SSLContext.getInstance("SSL")
+    TRUST_ALL_CONTEXT.init(None, trust_managers, None)
+    # Keep a static reference to the JVM's default SSLContext for restoring
+    # at a later time
+    DEFAULT_CONTEXT = SSLContext.getDefault()
+
+
+@pytest.fixture
+def trust_all_certificates(request):
+    """Decorator function that will make it so the context of the decorated
+    method will run with our TrustManager that accepts all certificates"""
+    # Only do this if running under Jython
+    is_java = "java" in sys.platform
+
+    if is_java:
+        from javax.net.ssl import SSLContext
+
+        SSLContext.setDefault(TRUST_ALL_CONTEXT)
+
+    def fin():
+        if is_java:
+            SSLContext.setDefault(DEFAULT_CONTEXT)
+
+    request.addfinalizer(fin)
+
+
+@pytest.mark.usefixtures("trust_all_certificates")
+def test_ssl(db_kwargs):
+    context = ssl.SSLContext()
+    context.check_hostname = False
+    db_kwargs["ssl_context"] = context
+    with pg8000.connect(**db_kwargs):
+        pass
diff -pruN 1.10.6-3/test/legacy/test_benchmarks.py 1.23.0-0ubuntu1/test/legacy/test_benchmarks.py
--- 1.10.6-3/test/legacy/test_benchmarks.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_benchmarks.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,27 @@
+import pytest
+
+
+@pytest.mark.parametrize(
+    "txt",
+    (
+        ("int2", "cast(id / 100 as int2)"),
+        "cast(id as int4)",
+        "cast(id * 100 as int8)",
+        "(id % 2) = 0",
+        "N'Static text string'",
+        "cast(id / 100 as float4)",
+        "cast(id / 100 as float8)",
+        "cast(id / 100 as numeric)",
+        "timestamp '2001-09-28'",
+    ),
+)
+def test_round_trips(con, benchmark, txt):
+    def torun():
+        query = """SELECT {0} AS column1, {0} AS column2, {0} AS column3,
+            {0} AS column4, {0} AS column5, {0} AS column6, {0} AS column7
+            FROM (SELECT generate_series(1, 10000) AS id) AS tbl""".format(
+            txt
+        )
+        con.run(query)
+
+    benchmark(torun)
diff -pruN 1.10.6-3/test/legacy/test_connection.py 1.23.0-0ubuntu1/test/legacy/test_connection.py
--- 1.10.6-3/test/legacy/test_connection.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_connection.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,190 @@
+import pytest
+
+import pg8000
+
+
+def testUnixSocketMissing():
+    conn_params = {"unix_sock": "/file-does-not-exist", "user": "doesn't-matter"}
+
+    with pytest.raises(pg8000.InterfaceError):
+        pg8000.connect(**conn_params)
+
+
+def test_internet_socket_connection_refused():
+    conn_params = {"port": 0, "user": "doesn't-matter"}
+
+    with pytest.raises(
+        pg8000.InterfaceError,
+        match="Can't create a connection to host localhost and port 0 "
+        "\\(timeout is None and source_address is None\\).",
+    ):
+        pg8000.connect(**conn_params)
+
+
+def testDatabaseMissing(db_kwargs):
+    db_kwargs["database"] = "missing-db"
+    with pytest.raises(pg8000.ProgrammingError):
+        pg8000.connect(**db_kwargs)
+
+
+def test_notify(con):
+    backend_pid = con.run("select pg_backend_pid()")[0][0]
+    assert list(con.notifications) == []
+    con.run("LISTEN test")
+    con.run("NOTIFY test")
+    con.commit()
+
+    con.run("VALUES (1, 2), (3, 4), (5, 6)")
+    assert len(con.notifications) == 1
+    assert con.notifications[0] == (backend_pid, "test", "")
+
+
+def test_notify_with_payload(con):
+    backend_pid = con.run("select pg_backend_pid()")[0][0]
+    assert list(con.notifications) == []
+    con.run("LISTEN test")
+    con.run("NOTIFY test, 'Parnham'")
+    con.commit()
+
+    con.run("VALUES (1, 2), (3, 4), (5, 6)")
+    assert len(con.notifications) == 1
+    assert con.notifications[0] == (backend_pid, "test", "Parnham")
+
+
+# This requires a line in pg_hba.conf that requires md5 for the database
+# pg8000_md5
+
+
+def testMd5(db_kwargs):
+    db_kwargs["database"] = "pg8000_md5"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.ProgrammingError, match="3D000"):
+        pg8000.connect(**db_kwargs)
+
+
+# This requires a line in pg_hba.conf that requires 'password' for the
+# database pg8000_password
+
+
+def testPassword(db_kwargs):
+    db_kwargs["database"] = "pg8000_password"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.ProgrammingError, match="3D000"):
+        pg8000.connect(**db_kwargs)
+
+
+def testUnicodeDatabaseName(db_kwargs):
+    db_kwargs["database"] = "pg8000_sn\uFF6Fw"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.ProgrammingError, match="3D000"):
+        pg8000.connect(**db_kwargs)
+
+
+def testBytesDatabaseName(db_kwargs):
+    """Should only raise an exception saying db doesn't exist"""
+
+    db_kwargs["database"] = bytes("pg8000_sn\uFF6Fw", "utf8")
+    with pytest.raises(pg8000.ProgrammingError, match="3D000"):
+        pg8000.connect(**db_kwargs)
+
+
+def testBytesPassword(con, db_kwargs):
+    # Create user
+    username = "boltzmann"
+    password = "cha\uFF6Fs"
+    with con.cursor() as cur:
+        cur.execute("create user " + username + " with password '" + password + "';")
+        con.commit()
+
+        db_kwargs["user"] = username
+        db_kwargs["password"] = password.encode("utf8")
+        db_kwargs["database"] = "pg8000_md5"
+        with pytest.raises(pg8000.ProgrammingError, match="3D000"):
+            pg8000.connect(**db_kwargs)
+
+        cur.execute("drop role " + username)
+        con.commit()
+
+
+def test_broken_pipe_read(con, db_kwargs):
+    db1 = pg8000.legacy.connect(**db_kwargs)
+    cur1 = db1.cursor()
+    cur2 = con.cursor()
+    cur1.execute("select pg_backend_pid()")
+    pid1 = cur1.fetchone()[0]
+
+    cur2.execute("select pg_terminate_backend(%s)", (pid1,))
+    with pytest.raises(pg8000.exceptions.InterfaceError, match="network error on read"):
+        cur1.execute("select 1")
+
+
+def test_broken_pipe_flush(con, db_kwargs):
+    db1 = pg8000.legacy.connect(**db_kwargs)
+    cur1 = db1.cursor()
+    cur2 = con.cursor()
+    cur1.execute("select pg_backend_pid()")
+    pid1 = cur1.fetchone()[0]
+
+    cur2.execute("select pg_terminate_backend(%s)", (pid1,))
+    try:
+        cur1.execute("select 1")
+    except BaseException:
+        pass
+
+    # Can do an assert_raises when we're on 3.8 or above
+    try:
+        db1.close()
+    except pg8000.exceptions.InterfaceError as e:
+        assert str(e) == "network error on flush"
+
+
+def test_broken_pipe_unpack(con):
+    cur = con.cursor()
+    cur.execute("select pg_backend_pid()")
+    pid1 = cur.fetchone()[0]
+
+    with pytest.raises(pg8000.legacy.InterfaceError, match="network error"):
+        cur.execute("select pg_terminate_backend(%s)", (pid1,))
+
+
+def testApplicatioName(db_kwargs):
+    app_name = "my test application name"
+    db_kwargs["application_name"] = app_name
+    with pg8000.connect(**db_kwargs) as db:
+        cur = db.cursor()
+        cur.execute(
+            "select application_name from pg_stat_activity "
+            " where pid = pg_backend_pid()"
+        )
+
+        application_name = cur.fetchone()[0]
+        assert application_name == app_name
+
+
+def test_application_name_integer(db_kwargs):
+    db_kwargs["application_name"] = 1
+    with pytest.raises(
+        pg8000.InterfaceError,
+        match="The parameter application_name can't be of type <class 'int'>.",
+    ):
+        pg8000.connect(**db_kwargs)
+
+
+def test_application_name_bytearray(db_kwargs):
+    db_kwargs["application_name"] = bytearray(b"Philby")
+    pg8000.connect(**db_kwargs)
+
+
+# This requires a line in pg_hba.conf that requires scram-sha-256 for the
+# database scram-sha-256
+
+
+def test_scram_sha_256(db_kwargs):
+    db_kwargs["database"] = "pg8000_scram_sha_256"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.ProgrammingError, match="3D000"):
+        pg8000.connect(**db_kwargs)
diff -pruN 1.10.6-3/test/legacy/test_copy.py 1.23.0-0ubuntu1/test/legacy/test_copy.py
--- 1.10.6-3/test/legacy/test_copy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_copy.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,90 @@
+from io import BytesIO
+
+import pytest
+
+
+@pytest.fixture
+def db_table(request, con):
+    with con.cursor() as cursor:
+        cursor.execute(
+            "CREATE TEMPORARY TABLE t1 (f1 int primary key, "
+            "f2 int not null, f3 varchar(50) null) "
+            "on commit drop"
+        )
+    return con
+
+
+def test_copy_to_with_table(db_table):
+    with db_table.cursor() as cursor:
+        cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, 1))
+        cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 2, 2))
+        cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 3, 3))
+
+        stream = BytesIO()
+        cursor.execute("copy t1 to stdout", stream=stream)
+        assert stream.getvalue() == b"1\t1\t1\n2\t2\t2\n3\t3\t3\n"
+        assert cursor.rowcount == 3
+
+
+def test_copy_to_with_query(db_table):
+    with db_table.cursor() as cursor:
+        stream = BytesIO()
+        cursor.execute(
+            "COPY (SELECT 1 as One, 2 as Two) TO STDOUT WITH DELIMITER "
+            "'X' CSV HEADER QUOTE AS 'Y' FORCE QUOTE Two",
+            stream=stream,
+        )
+        assert stream.getvalue() == b"oneXtwo\n1XY2Y\n"
+        assert cursor.rowcount == 1
+
+
+def test_copy_from_with_table(db_table):
+    with db_table.cursor() as cursor:
+        stream = BytesIO(b"1\t1\t1\n2\t2\t2\n3\t3\t3\n")
+        cursor.execute("copy t1 from STDIN", stream=stream)
+        assert cursor.rowcount == 3
+
+        cursor.execute("SELECT * FROM t1 ORDER BY f1")
+        retval = cursor.fetchall()
+        assert retval == ([1, 1, "1"], [2, 2, "2"], [3, 3, "3"])
+
+
+def test_copy_from_with_query(db_table):
+    with db_table.cursor() as cursor:
+        stream = BytesIO(b"f1Xf2\n1XY1Y\n")
+        cursor.execute(
+            "COPY t1 (f1, f2) FROM STDIN WITH DELIMITER 'X' CSV HEADER "
+            "QUOTE AS 'Y' FORCE NOT NULL f1",
+            stream=stream,
+        )
+        assert cursor.rowcount == 1
+
+        cursor.execute("SELECT * FROM t1 ORDER BY f1")
+        retval = cursor.fetchall()
+        assert retval == ([1, 1, None],)
+
+
+def test_copy_from_with_error(db_table):
+    with db_table.cursor() as cursor:
+        stream = BytesIO(b"f1Xf2\n\n1XY1Y\n")
+        with pytest.raises(BaseException) as e:
+            cursor.execute(
+                "COPY t1 (f1, f2) FROM STDIN WITH DELIMITER 'X' CSV HEADER "
+                "QUOTE AS 'Y' FORCE NOT NULL f1",
+                stream=stream,
+            )
+
+        arg = {
+            "S": ("ERROR",),
+            "C": ("22P02",),
+            "M": (
+                'invalid input syntax for type integer: ""',
+                'invalid input syntax for integer: ""',
+            ),
+            "W": ('COPY t1, line 2, column f1: ""',),
+            "F": ("numutils.c",),
+            "R": ("pg_atoi", "pg_strtoint32"),
+        }
+        earg = e.value.args[0]
+        for k, v in arg.items():
+            assert earg[k] in v
diff -pruN 1.10.6-3/test/legacy/test_dbapi20.py 1.23.0-0ubuntu1/test/legacy/test_dbapi20.py
--- 1.10.6-3/test/legacy/test_dbapi20.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_dbapi20.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,678 @@
+import time
+import warnings
+
+import pytest
+
+import pg8000
+
+""" Python DB API 2.0 driver compliance unit test suite.
+
+    This software is Public Domain and may be used without restrictions.
+
+ "Now we have booze and barflies entering the discussion, plus rumours of
+  DBAs on drugs... and I won't tell you what flashes through my mind each
+  time I read the subject line with 'Anal Compliance' in it.  All around
+  this is turning out to be a thoroughly unwholesome unit test."
+
+    -- Ian Bicking
+"""
+
+__rcs_id__ = "$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $"
+__version__ = "$Revision: 1.10 $"[11:-2]
+__author__ = "Stuart Bishop <zen@shangri-la.dropbear.id.au>"
+
+
+# $Log: dbapi20.py,v $
+# Revision 1.10  2003/10/09 03:14:14  zenzen
+# Add test for DB API 2.0 optional extension, where database exceptions
+# are exposed as attributes on the Connection object.
+#
+# Revision 1.9  2003/08/13 01:16:36  zenzen
+# Minor tweak from Stefan Fleiter
+#
+# Revision 1.8  2003/04/10 00:13:25  zenzen
+# Changes, as per suggestions by M.-A. Lemburg
+# - Add a table prefix, to ensure namespace collisions can always be avoided
+#
+# Revision 1.7  2003/02/26 23:33:37  zenzen
+# Break out DDL into helper functions, as per request by David Rushby
+#
+# Revision 1.6  2003/02/21 03:04:33  zenzen
+# Stuff from Henrik Ekelund:
+#     added test_None
+#     added test_nextset & hooks
+#
+# Revision 1.5  2003/02/17 22:08:43  zenzen
+# Implement suggestions and code from Henrik Eklund - test that
+# cursor.arraysize defaults to 1 & generic cursor.callproc test added
+#
+# Revision 1.4  2003/02/15 00:16:33  zenzen
+# Changes, as per suggestions and bug reports by M.-A. Lemburg,
+# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
+# - Class renamed
+# - Now a subclass of TestCase, to avoid requiring the driver stub
+#   to use multiple inheritance
+# - Reversed the polarity of buggy test in test_description
+# - Test exception heirarchy correctly
+# - self.populate is now self._populate(), so if a driver stub
+#   overrides self.ddl1 this change propogates
+# - VARCHAR columns now have a width, which will hopefully make the
+#   DDL even more portible (this will be reversed if it causes more problems)
+# - cursor.rowcount being checked after various execute and fetchXXX methods
+# - Check for fetchall and fetchmany returning empty lists after results
+#   are exhausted (already checking for empty lists if select retrieved
+#   nothing
+# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
+#
+
+
+""" Test a database self.driver for DB API 2.0 compatibility.
+    This implementation tests Gadfly, but the TestCase
+    is structured so that other self.drivers can subclass this
+    test case to ensure compiliance with the DB-API. It is
+    expected that this TestCase may be expanded in the future
+    if ambiguities or edge conditions are discovered.
+
+    The 'Optional Extensions' are not yet being tested.
+
+    self.drivers should subclass this test, overriding setUp, tearDown,
+    self.driver, connect_args and connect_kw_args. Class specification
+    should be as follows:
+
+    import dbapi20
+    class mytest(dbapi20.DatabaseAPI20Test):
+       [...]
+
+    Don't 'import DatabaseAPI20Test from dbapi20', or you will
+    confuse the unit tester - just 'import dbapi20'.
+"""
+
+# The self.driver module. This should be the module where the 'connect'
+# method is to be found
+driver = pg8000
+table_prefix = "dbapi20test_"  # If you need to specify a prefix for tables
+
+ddl1 = "create table %sbooze (name varchar(20))" % table_prefix
+ddl2 = "create table %sbarflys (name varchar(20))" % table_prefix
+xddl1 = "drop table %sbooze" % table_prefix
+xddl2 = "drop table %sbarflys" % table_prefix
+
+# Name of stored procedure to convert
+# string->lowercase
+lowerfunc = "lower"
+
+
+# Some drivers may need to override these helpers, for example adding
+# a 'commit' after the execute.
+def executeDDL1(cursor):
+    cursor.execute(ddl1)
+
+
+def executeDDL2(cursor):
+    cursor.execute(ddl2)
+
+
+@pytest.fixture
+def db(request, con):
+    def fin():
+        with con.cursor() as cur:
+            for ddl in (xddl1, xddl2):
+                try:
+                    cur.execute(ddl)
+                    con.commit()
+                except driver.Error:
+                    # Assume table didn't exist. Other tests will check if
+                    # execute is busted.
+                    pass
+
+    request.addfinalizer(fin)
+    return con
+
+
+def test_apilevel():
+    # Must exist
+    apilevel = driver.apilevel
+
+    # Must equal 2.0
+    assert apilevel == "2.0"
+
+
+def test_threadsafety():
+    try:
+        # Must exist
+        threadsafety = driver.threadsafety
+        # Must be a valid value
+        assert threadsafety in (0, 1, 2, 3)
+    except AttributeError:
+        assert False, "Driver doesn't define threadsafety"
+
+
+def test_paramstyle():
+    try:
+        # Must exist
+        paramstyle = driver.paramstyle
+        # Must be a valid value
+        assert paramstyle in ("qmark", "numeric", "named", "format", "pyformat")
+    except AttributeError:
+        assert False, "Driver doesn't define paramstyle"
+
+
+def test_Exceptions():
+    # Make sure required exceptions exist, and are in the
+    # defined heirarchy.
+    assert issubclass(driver.Warning, Exception)
+    assert issubclass(driver.Error, Exception)
+    assert issubclass(driver.InterfaceError, driver.Error)
+    assert issubclass(driver.DatabaseError, driver.Error)
+    assert issubclass(driver.OperationalError, driver.Error)
+    assert issubclass(driver.IntegrityError, driver.Error)
+    assert issubclass(driver.InternalError, driver.Error)
+    assert issubclass(driver.ProgrammingError, driver.Error)
+    assert issubclass(driver.NotSupportedError, driver.Error)
+
+
+def test_ExceptionsAsConnectionAttributes(con):
+    # OPTIONAL EXTENSION
+    # Test for the optional DB API 2.0 extension, where the exceptions
+    # are exposed as attributes on the Connection object
+    # I figure this optional extension will be implemented by any
+    # driver author who is using this test suite, so it is enabled
+    # by default.
+    warnings.simplefilter("ignore")
+    drv = driver
+    assert con.Warning is drv.Warning
+    assert con.Error is drv.Error
+    assert con.InterfaceError is drv.InterfaceError
+    assert con.DatabaseError is drv.DatabaseError
+    assert con.OperationalError is drv.OperationalError
+    assert con.IntegrityError is drv.IntegrityError
+    assert con.InternalError is drv.InternalError
+    assert con.ProgrammingError is drv.ProgrammingError
+    assert con.NotSupportedError is drv.NotSupportedError
+    warnings.resetwarnings()
+
+
+def test_commit(con):
+    # Commit must work, even if it doesn't do anything
+    con.commit()
+
+
+def test_rollback(con):
+    # If rollback is defined, it should either work or throw
+    # the documented exception
+    if hasattr(con, "rollback"):
+        try:
+            con.rollback()
+        except driver.NotSupportedError:
+            pass
+
+
+def test_cursor(con):
+    con.cursor()
+
+
+def test_cursor_isolation(con):
+    # Make sure cursors created from the same connection have
+    # the documented transaction isolation level
+    cur1 = con.cursor()
+    cur2 = con.cursor()
+    executeDDL1(cur1)
+    cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
+    cur2.execute("select name from %sbooze" % table_prefix)
+    booze = cur2.fetchall()
+    assert len(booze) == 1
+    assert len(booze[0]) == 1
+    assert booze[0][0] == "Victoria Bitter"
+
+
+def test_description(con):
+    cur = con.cursor()
+    executeDDL1(cur)
+    assert cur.description is None, (
+        "cursor.description should be none after executing a "
+        "statement that can return no rows (such as DDL)"
+    )
+    cur.execute("select name from %sbooze" % table_prefix)
+    assert len(cur.description) == 1, "cursor.description describes too many columns"
+    assert (
+        len(cur.description[0]) == 7
+    ), "cursor.description[x] tuples must have 7 elements"
+    assert (
+        cur.description[0][0].lower() == "name"
+    ), "cursor.description[x][0] must return column name"
+    assert cur.description[0][1] == driver.STRING, (
+        "cursor.description[x][1] must return column type. Got %r"
+        % cur.description[0][1]
+    )
+
+    # Make sure self.description gets reset
+    executeDDL2(cur)
+    assert cur.description is None, (
+        "cursor.description not being set to None when executing "
+        "no-result statements (eg. DDL)"
+    )
+
+
+def test_rowcount(cursor):
+    executeDDL1(cursor)
+    assert cursor.rowcount == -1, (
+        "cursor.rowcount should be -1 after executing no-result " "statements"
+    )
+    cursor.execute("insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
+    assert cursor.rowcount in (-1, 1), (
+        "cursor.rowcount should == number or rows inserted, or "
+        "set to -1 after executing an insert statement"
+    )
+    cursor.execute("select name from %sbooze" % table_prefix)
+    assert cursor.rowcount in (-1, 1), (
+        "cursor.rowcount should == number of rows returned, or "
+        "set to -1 after executing a select statement"
+    )
+    executeDDL2(cursor)
+    assert cursor.rowcount == -1, (
+        "cursor.rowcount not being reset to -1 after executing " "no-result statements"
+    )
+
+
+lower_func = "lower"
+
+
+def test_callproc(cursor):
+    if lower_func and hasattr(cursor, "callproc"):
+        r = cursor.callproc(lower_func, ("FOO",))
+        assert len(r) == 1
+        assert r[0] == "FOO"
+        r = cursor.fetchall()
+        assert len(r) == 1, "callproc produced no result set"
+        assert len(r[0]) == 1, "callproc produced invalid result set"
+        assert r[0][0] == "foo", "callproc produced invalid results"
+
+
+def test_close(con):
+    cur = con.cursor()
+    con.close()
+
+    # cursor.execute should raise an Error if called after connection
+    # closed
+    with pytest.raises(driver.Error):
+        executeDDL1(cur)
+
+    # connection.commit should raise an Error if called after connection'
+    # closed.'
+    with pytest.raises(driver.Error):
+        con.commit()
+
+    # connection.close should raise an Error if called more than once
+    with pytest.raises(driver.Error):
+        con.close()
+
+
+def test_execute(con):
+    cur = con.cursor()
+    _paraminsert(cur)
+
+
+def _paraminsert(cur):
+    executeDDL1(cur)
+    cur.execute("insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
+    assert cur.rowcount in (-1, 1)
+
+    if driver.paramstyle == "qmark":
+        cur.execute("insert into %sbooze values (?)" % table_prefix, ("Cooper's",))
+    elif driver.paramstyle == "numeric":
+        cur.execute("insert into %sbooze values (:1)" % table_prefix, ("Cooper's",))
+    elif driver.paramstyle == "named":
+        cur.execute(
+            "insert into %sbooze values (:beer)" % table_prefix, {"beer": "Cooper's"}
+        )
+    elif driver.paramstyle == "format":
+        cur.execute("insert into %sbooze values (%%s)" % table_prefix, ("Cooper's",))
+    elif driver.paramstyle == "pyformat":
+        cur.execute(
+            "insert into %sbooze values (%%(beer)s)" % table_prefix,
+            {"beer": "Cooper's"},
+        )
+    else:
+        assert False, "Invalid paramstyle"
+
+    assert cur.rowcount in (-1, 1)
+
+    cur.execute("select name from %sbooze" % table_prefix)
+    res = cur.fetchall()
+    assert len(res) == 2, "cursor.fetchall returned too few rows"
+    beers = [res[0][0], res[1][0]]
+    beers.sort()
+    assert beers[0] == "Cooper's", (
+        "cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly"
+    )
+    assert beers[1] == "Victoria Bitter", (
+        "cursor.fetchall retrieved incorrect data, or data inserted " "incorrectly"
+    )
+
+
+def test_executemany(cursor):
+    executeDDL1(cursor)
+    largs = [("Cooper's",), ("Boag's",)]
+    margs = [{"beer": "Cooper's"}, {"beer": "Boag's"}]
+    if driver.paramstyle == "qmark":
+        cursor.executemany("insert into %sbooze values (?)" % table_prefix, largs)
+    elif driver.paramstyle == "numeric":
+        cursor.executemany("insert into %sbooze values (:1)" % table_prefix, largs)
+    elif driver.paramstyle == "named":
+        cursor.executemany("insert into %sbooze values (:beer)" % table_prefix, margs)
+    elif driver.paramstyle == "format":
+        cursor.executemany("insert into %sbooze values (%%s)" % table_prefix, largs)
+    elif driver.paramstyle == "pyformat":
+        cursor.executemany(
+            "insert into %sbooze values (%%(beer)s)" % (table_prefix), margs
+        )
+    else:
+        assert False, "Unknown paramstyle"
+
+    assert cursor.rowcount in (-1, 2), (
+        "insert using cursor.executemany set cursor.rowcount to "
+        "incorrect value %r" % cursor.rowcount
+    )
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    res = cursor.fetchall()
+    assert len(res) == 2, "cursor.fetchall retrieved incorrect number of rows"
+    beers = [res[0][0], res[1][0]]
+    beers.sort()
+    assert beers[0] == "Boag's", "incorrect data retrieved"
+    assert beers[1] == "Cooper's", "incorrect data retrieved"
+
+
+def test_fetchone(cursor):
+    # cursor.fetchone should raise an Error if called before
+    # executing a select-type query
+    with pytest.raises(driver.Error):
+        cursor.fetchone()
+
+    # cursor.fetchone should raise an Error if called after
+    # executing a query that cannnot return rows
+    executeDDL1(cursor)
+    with pytest.raises(driver.Error):
+        cursor.fetchone()
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    assert cursor.fetchone() is None, (
+        "cursor.fetchone should return None if a query retrieves " "no rows"
+    )
+    assert cursor.rowcount in (-1, 0)
+
+    # cursor.fetchone should raise an Error if called after
+    # executing a query that cannnot return rows
+    cursor.execute("insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
+    with pytest.raises(driver.Error):
+        cursor.fetchone()
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    r = cursor.fetchone()
+    assert len(r) == 1, "cursor.fetchone should have retrieved a single row"
+    assert r[0] == "Victoria Bitter", "cursor.fetchone retrieved incorrect data"
+    assert (
+        cursor.fetchone() is None
+    ), "cursor.fetchone should return None if no more rows available"
+    assert cursor.rowcount in (-1, 1)
+
+
+samples = [
+    "Carlton Cold",
+    "Carlton Draft",
+    "Mountain Goat",
+    "Redback",
+    "Victoria Bitter",
+    "XXXX",
+]
+
+
+def _populate():
+    """Return a list of sql commands to setup the DB for the fetch
+    tests.
+    """
+    populate = [
+        "insert into %sbooze values ('%s')" % (table_prefix, s) for s in samples
+    ]
+    return populate
+
+
+def test_fetchmany(cursor):
+    # cursor.fetchmany should raise an Error if called without
+    # issuing a query
+    with pytest.raises(driver.Error):
+        cursor.fetchmany(4)
+
+    executeDDL1(cursor)
+    for sql in _populate():
+        cursor.execute(sql)
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    r = cursor.fetchmany()
+    assert len(r) == 1, (
+        "cursor.fetchmany retrieved incorrect number of rows, "
+        "default of arraysize is one."
+    )
+    cursor.arraysize = 10
+    r = cursor.fetchmany(3)  # Should get 3 rows
+    assert len(r) == 3, "cursor.fetchmany retrieved incorrect number of rows"
+    r = cursor.fetchmany(4)  # Should get 2 more
+    assert len(r) == 2, "cursor.fetchmany retrieved incorrect number of rows"
+    r = cursor.fetchmany(4)  # Should be an empty sequence
+    assert len(r) == 0, (
+        "cursor.fetchmany should return an empty sequence after "
+        "results are exhausted"
+    )
+    assert cursor.rowcount in (-1, 6)
+
+    # Same as above, using cursor.arraysize
+    cursor.arraysize = 4
+    cursor.execute("select name from %sbooze" % table_prefix)
+    r = cursor.fetchmany()  # Should get 4 rows
+    assert len(r) == 4, "cursor.arraysize not being honoured by fetchmany"
+    r = cursor.fetchmany()  # Should get 2 more
+    assert len(r) == 2
+    r = cursor.fetchmany()  # Should be an empty sequence
+    assert len(r) == 0
+    assert cursor.rowcount in (-1, 6)
+
+    cursor.arraysize = 6
+    cursor.execute("select name from %sbooze" % table_prefix)
+    rows = cursor.fetchmany()  # Should get all rows
+    assert cursor.rowcount in (-1, 6)
+    assert len(rows) == 6
+    assert len(rows) == 6
+    rows = [row[0] for row in rows]
+    rows.sort()
+
+    # Make sure we get the right data back out
+    for i in range(0, 6):
+        assert rows[i] == samples[i], "incorrect data retrieved by cursor.fetchmany"
+
+    rows = cursor.fetchmany()  # Should return an empty list
+    assert len(rows) == 0, (
+        "cursor.fetchmany should return an empty sequence if "
+        "called after the whole result set has been fetched"
+    )
+    assert cursor.rowcount in (-1, 6)
+
+    executeDDL2(cursor)
+    cursor.execute("select name from %sbarflys" % table_prefix)
+    r = cursor.fetchmany()  # Should get empty sequence
+    assert len(r) == 0, (
+        "cursor.fetchmany should return an empty sequence if " "query retrieved no rows"
+    )
+    assert cursor.rowcount in (-1, 0)
+
+
+def test_fetchall(cursor):
+    # cursor.fetchall should raise an Error if called
+    # without executing a query that may return rows (such
+    # as a select)
+    with pytest.raises(driver.Error):
+        cursor.fetchall()
+
+    executeDDL1(cursor)
+    for sql in _populate():
+        cursor.execute(sql)
+
+    # cursor.fetchall should raise an Error if called
+    # after executing a a statement that cannot return rows
+    with pytest.raises(driver.Error):
+        cursor.fetchall()
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    rows = cursor.fetchall()
+    assert cursor.rowcount in (-1, len(samples))
+    assert len(rows) == len(samples), "cursor.fetchall did not retrieve all rows"
+    rows = [r[0] for r in rows]
+    rows.sort()
+    for i in range(0, len(samples)):
+        assert rows[i] == samples[i], "cursor.fetchall retrieved incorrect rows"
+    rows = cursor.fetchall()
+    assert len(rows) == 0, (
+        "cursor.fetchall should return an empty list if called "
+        "after the whole result set has been fetched"
+    )
+    assert cursor.rowcount in (-1, len(samples))
+
+    executeDDL2(cursor)
+    cursor.execute("select name from %sbarflys" % table_prefix)
+    rows = cursor.fetchall()
+    assert cursor.rowcount in (-1, 0)
+    assert len(rows) == 0, (
+        "cursor.fetchall should return an empty list if "
+        "a select query returns no rows"
+    )
+
+
+def test_mixedfetch(cursor):
+    executeDDL1(cursor)
+    for sql in _populate():
+        cursor.execute(sql)
+
+    cursor.execute("select name from %sbooze" % table_prefix)
+    rows1 = cursor.fetchone()
+    rows23 = cursor.fetchmany(2)
+    rows4 = cursor.fetchone()
+    rows56 = cursor.fetchall()
+    assert cursor.rowcount in (-1, 6)
+    assert len(rows23) == 2, "fetchmany returned incorrect number of rows"
+    assert len(rows56) == 2, "fetchall returned incorrect number of rows"
+
+    rows = [rows1[0]]
+    rows.extend([rows23[0][0], rows23[1][0]])
+    rows.append(rows4[0])
+    rows.extend([rows56[0][0], rows56[1][0]])
+    rows.sort()
+    for i in range(0, len(samples)):
+        assert rows[i] == samples[i], "incorrect data retrieved or inserted"
+
+
+def help_nextset_setUp(cur):
+    """Should create a procedure called deleteme
+    that returns two result sets, first the
+    number of rows in booze then "name from booze"
+    """
+    raise NotImplementedError("Helper not implemented")
+
+
+def help_nextset_tearDown(cur):
+    "If cleaning up is needed after nextSetTest"
+    raise NotImplementedError("Helper not implemented")
+
+
+def test_nextset(cursor):
+    if not hasattr(cursor, "nextset"):
+        return
+
+    try:
+        executeDDL1(cursor)
+        sql = _populate()
+        for sql in _populate():
+            cursor.execute(sql)
+
+        help_nextset_setUp(cursor)
+
+        cursor.callproc("deleteme")
+        numberofrows = cursor.fetchone()
+        assert numberofrows[0] == len(samples)
+        assert cursor.nextset()
+        names = cursor.fetchall()
+        assert len(names) == len(samples)
+        s = cursor.nextset()
+        assert s is None, "No more return sets, should return None"
+    finally:
+        help_nextset_tearDown(cursor)
+
+
+def test_arraysize(cursor):
+    # Not much here - rest of the tests for this are in test_fetchmany
+    assert hasattr(cursor, "arraysize"), "cursor.arraysize must be defined"
+
+
+def test_setinputsizes(cursor):
+    cursor.setinputsizes(25)
+
+
+def test_setoutputsize_basic(cursor):
+    # Basic test is to make sure setoutputsize doesn't blow up
+    cursor.setoutputsize(1000)
+    cursor.setoutputsize(2000, 0)
+    _paraminsert(cursor)  # Make sure the cursor still works
+
+
+def test_None(cursor):
+    executeDDL1(cursor)
+    cursor.execute("insert into %sbooze values (NULL)" % table_prefix)
+    cursor.execute("select name from %sbooze" % table_prefix)
+    r = cursor.fetchall()
+    assert len(r) == 1
+    assert len(r[0]) == 1
+    assert r[0][0] is None, "NULL value not returned as None"
+
+
+def test_Date():
+    driver.Date(2002, 12, 25)
+    driver.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0)))
+    # Can we assume this? API doesn't specify, but it seems implied
+    # self.assertEqual(str(d1),str(d2))
+
+
+def test_Time():
+    driver.Time(13, 45, 30)
+    driver.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0)))
+    # Can we assume this? API doesn't specify, but it seems implied
+    # self.assertEqual(str(t1),str(t2))
+
+
+def test_Timestamp():
+    driver.Timestamp(2002, 12, 25, 13, 45, 30)
+    driver.TimestampFromTicks(time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0)))
+    # Can we assume this? API doesn't specify, but it seems implied
+    # self.assertEqual(str(t1),str(t2))
+
+
+def test_Binary():
+    driver.Binary(b"Something")
+    driver.Binary(b"")
+
+
+def test_STRING():
+    assert hasattr(driver, "STRING"), "module.STRING must be defined"
+
+
+def test_BINARY():
+    assert hasattr(driver, "BINARY"), "module.BINARY must be defined."
+
+
+def test_NUMBER():
+    assert hasattr(driver, "NUMBER"), "module.NUMBER must be defined."
+
+
+def test_DATETIME():
+    assert hasattr(driver, "DATETIME"), "module.DATETIME must be defined."
+
+
+def test_ROWID():
+    assert hasattr(driver, "ROWID"), "module.ROWID must be defined."
diff -pruN 1.10.6-3/test/legacy/test_dbapi.py 1.23.0-0ubuntu1/test/legacy/test_dbapi.py
--- 1.10.6-3/test/legacy/test_dbapi.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_dbapi.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,225 @@
+import datetime
+import os
+import time
+
+import pytest
+
+import pg8000
+
+
+@pytest.fixture
+def has_tzset():
+
+    # Neither Windows nor Jython 2.5.3 have a time.tzset() so skip
+    if hasattr(time, "tzset"):
+        os.environ["TZ"] = "UTC"
+        time.tzset()
+        return True
+    return False
+
+
+# DBAPI compatible interface tests
+@pytest.fixture
+def db_table(con, has_tzset):
+    with con.cursor() as c:
+        c.execute(
+            "CREATE TEMPORARY TABLE t1 "
+            "(f1 int primary key, f2 int not null, f3 varchar(50) null) "
+            "ON COMMIT DROP"
+        )
+        c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, None))
+        c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 10, None))
+        c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 100, None))
+        c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (4, 1000, None))
+        c.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (5, 10000, None))
+    return con
+
+
+def test_parallel_queries(db_table):
+    with db_table.cursor() as c1, db_table.cursor() as c2:
+
+        c1.execute("SELECT f1, f2, f3 FROM t1")
+        while 1:
+            row = c1.fetchone()
+            if row is None:
+                break
+            f1, f2, f3 = row
+            c2.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (f1,))
+            while 1:
+                row = c2.fetchone()
+                if row is None:
+                    break
+                f1, f2, f3 = row
+
+
+def test_qmark(db_table):
+    orig_paramstyle = pg8000.paramstyle
+    try:
+        pg8000.paramstyle = "qmark"
+        with db_table.cursor() as c1:
+            c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > ?", (3,))
+            while 1:
+                row = c1.fetchone()
+                if row is None:
+                    break
+                f1, f2, f3 = row
+    finally:
+        pg8000.paramstyle = orig_paramstyle
+
+
+def test_numeric(db_table):
+    orig_paramstyle = pg8000.paramstyle
+    try:
+        pg8000.paramstyle = "numeric"
+        with db_table.cursor() as c1:
+            c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > :1", (3,))
+            while 1:
+                row = c1.fetchone()
+                if row is None:
+                    break
+                f1, f2, f3 = row
+    finally:
+        pg8000.paramstyle = orig_paramstyle
+
+
+def test_named(db_table):
+    orig_paramstyle = pg8000.paramstyle
+    try:
+        pg8000.paramstyle = "named"
+        with db_table.cursor() as c1:
+            c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > :f1", {"f1": 3})
+            while 1:
+                row = c1.fetchone()
+                if row is None:
+                    break
+                f1, f2, f3 = row
+    finally:
+        pg8000.paramstyle = orig_paramstyle
+
+
+def test_format(db_table):
+    orig_paramstyle = pg8000.paramstyle
+    try:
+        pg8000.paramstyle = "format"
+        with db_table.cursor() as c1:
+            c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (3,))
+            while 1:
+                row = c1.fetchone()
+                if row is None:
+                    break
+                f1, f2, f3 = row
+    finally:
+        pg8000.paramstyle = orig_paramstyle
+
+
+def test_pyformat(db_table):
+    orig_paramstyle = pg8000.paramstyle
+    try:
+        pg8000.paramstyle = "pyformat"
+        with db_table.cursor() as c1:
+            c1.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %(f1)s", {"f1": 3})
+            while 1:
+                row = c1.fetchone()
+                if row is None:
+                    break
+                f1, f2, f3 = row
+    finally:
+        pg8000.paramstyle = orig_paramstyle
+
+
+def test_arraysize(db_table):
+    with db_table.cursor() as c1:
+        c1.arraysize = 3
+        c1.execute("SELECT * FROM t1")
+        retval = c1.fetchmany()
+        assert len(retval) == c1.arraysize
+
+
+def test_date():
+    val = pg8000.Date(2001, 2, 3)
+    assert val == datetime.date(2001, 2, 3)
+
+
+def test_time():
+    val = pg8000.Time(4, 5, 6)
+    assert val == datetime.time(4, 5, 6)
+
+
+def test_timestamp():
+    val = pg8000.Timestamp(2001, 2, 3, 4, 5, 6)
+    assert val == datetime.datetime(2001, 2, 3, 4, 5, 6)
+
+
+def test_date_from_ticks(has_tzset):
+    if has_tzset:
+        val = pg8000.DateFromTicks(1173804319)
+        assert val == datetime.date(2007, 3, 13)
+
+
+def testTimeFromTicks(has_tzset):
+    if has_tzset:
+        val = pg8000.TimeFromTicks(1173804319)
+        assert val == datetime.time(16, 45, 19)
+
+
+def test_timestamp_from_ticks(has_tzset):
+    if has_tzset:
+        val = pg8000.TimestampFromTicks(1173804319)
+        assert val == datetime.datetime(2007, 3, 13, 16, 45, 19)
+
+
+def test_binary():
+    v = pg8000.Binary(b"\x00\x01\x02\x03\x02\x01\x00")
+    assert v == b"\x00\x01\x02\x03\x02\x01\x00"
+    assert isinstance(v, pg8000.BINARY)
+
+
+def test_row_count(db_table):
+    with db_table.cursor() as c1:
+        c1.execute("SELECT * FROM t1")
+
+        assert 5 == c1.rowcount
+
+        c1.execute("UPDATE t1 SET f3 = %s WHERE f2 > 101", ("Hello!",))
+        assert 2 == c1.rowcount
+
+        c1.execute("DELETE FROM t1")
+        assert 5 == c1.rowcount
+
+
+def test_fetch_many(db_table):
+    with db_table.cursor() as cursor:
+        cursor.arraysize = 2
+        cursor.execute("SELECT * FROM t1")
+        assert 2 == len(cursor.fetchmany())
+        assert 2 == len(cursor.fetchmany())
+        assert 1 == len(cursor.fetchmany())
+        assert 0 == len(cursor.fetchmany())
+
+
+def test_iterator(db_table):
+    with db_table.cursor() as cursor:
+        cursor.execute("SELECT * FROM t1 ORDER BY f1")
+        f1 = 0
+        for row in cursor:
+            next_f1 = row[0]
+            assert next_f1 > f1
+            f1 = next_f1
+
+
+# Vacuum can't be run inside a transaction, so we need to turn
+# autocommit on.
+def test_vacuum(con):
+    con.autocommit = True
+    with con.cursor() as cursor:
+        cursor.execute("vacuum")
+
+
+def test_prepared_statement(con):
+    with con.cursor() as cursor:
+        cursor.execute("PREPARE gen_series AS SELECT generate_series(1, 10);")
+        cursor.execute("EXECUTE gen_series")
+
+
+def test_cursor_type(cursor):
+    assert str(type(cursor)) == "<class 'pg8000.legacy.Cursor'>"
diff -pruN 1.10.6-3/test/legacy/test_error_recovery.py 1.23.0-0ubuntu1/test/legacy/test_error_recovery.py
--- 1.10.6-3/test/legacy/test_error_recovery.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_error_recovery.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,49 @@
+import datetime
+import warnings
+
+import pytest
+
+import pg8000
+
+
+class PG8000TestException(Exception):
+    pass
+
+
+def raise_exception(val):
+    raise PG8000TestException("oh noes!")
+
+
+def test_py_value_fail(con, mocker):
+    # Ensure that if types.py_value throws an exception, the original
+    # exception is raised (PG8000TestException), and the connection is
+    # still usable after the error.
+    mocker.patch.object(con, "py_types")
+    con.py_types = {datetime.time: raise_exception}
+
+    with con.cursor() as c, pytest.raises(PG8000TestException):
+        c.execute("SELECT CAST(%s AS TIME)", (datetime.time(10, 30),))
+        c.fetchall()
+
+        # ensure that the connection is still usable for a new query
+        c.execute("VALUES ('hw3'::text)")
+        assert c.fetchone()[0] == "hw3"
+
+
+def test_no_data_error_recovery(con):
+    for i in range(1, 4):
+        with con.cursor() as c, pytest.raises(pg8000.ProgrammingError) as e:
+            c.execute("DROP TABLE t1")
+        assert e.value.args[0]["C"] == "42P01"
+        con.rollback()
+
+
+def testClosedConnection(db_kwargs):
+    warnings.simplefilter("ignore")
+    my_db = pg8000.connect(**db_kwargs)
+    cursor = my_db.cursor()
+    my_db.close()
+    with pytest.raises(my_db.InterfaceError, match="connection is closed"):
+        cursor.execute("VALUES ('hw1'::text)")
+
+    warnings.resetwarnings()
diff -pruN 1.10.6-3/test/legacy/test_paramstyle.py 1.23.0-0ubuntu1/test/legacy/test_paramstyle.py
--- 1.10.6-3/test/legacy/test_paramstyle.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_paramstyle.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,107 @@
+from pg8000.legacy import convert_paramstyle as convert
+
+
+# Tests of the convert_paramstyle function.
+
+
+def test_qmark():
+    args = 1, 2, 3
+    new_query, vals = convert(
+        "qmark",
+        'SELECT ?, ?, "field_?" FROM t '
+        "WHERE a='say ''what?''' AND b=? AND c=E'?\\'test\\'?'",
+        args,
+    )
+    expected = (
+        'SELECT $1, $2, "field_?" FROM t WHERE '
+        "a='say ''what?''' AND b=$3 AND c=E'?\\'test\\'?'"
+    )
+    assert (new_query, vals) == (expected, args)
+
+
+def test_qmark_2():
+    args = 1, 2, 3
+    new_query, vals = convert(
+        "qmark", "SELECT ?, ?, * FROM t WHERE a=? AND b='are you ''sure?'", args
+    )
+    expected = "SELECT $1, $2, * FROM t WHERE a=$3 AND b='are you ''sure?'"
+    assert (new_query, vals) == (expected, args)
+
+
+def test_numeric():
+    args = 1, 2, 3
+    new_query, vals = convert(
+        "numeric", "SELECT sum(x)::decimal(5, 2) :2, :1, * FROM t WHERE a=:3", args
+    )
+    expected = "SELECT sum(x)::decimal(5, 2) $2, $1, * FROM t WHERE a=$3"
+    assert (new_query, vals) == (expected, args)
+
+
+def test_numeric_default_parameter():
+    args = 1, 2, 3
+    new_query, vals = convert("numeric", "make_interval(days := 10)", args)
+
+    assert (new_query, vals) == ("make_interval(days := 10)", args)
+
+
+def test_named():
+    args = {
+        "f_2": 1,
+        "f1": 2,
+    }
+    new_query, vals = convert(
+        "named", "SELECT sum(x)::decimal(5, 2) :f_2, :f1 FROM t WHERE a=:f_2", args
+    )
+    expected = "SELECT sum(x)::decimal(5, 2) $1, $2 FROM t WHERE a=$1"
+    assert (new_query, vals) == (expected, (1, 2))
+
+
+def test_format():
+    args = 1, 2, 3
+    new_query, vals = convert(
+        "format",
+        "SELECT %s, %s, \"f1_%%\", E'txt_%%' "
+        "FROM t WHERE a=%s AND b='75%%' AND c = '%' -- Comment with %",
+        args,
+    )
+    expected = (
+        "SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$3 AND "
+        "b='75%%' AND c = '%' -- Comment with %"
+    )
+    assert (new_query, vals) == (expected, args)
+
+    sql = (
+        r"""COMMENT ON TABLE test_schema.comment_test """
+        r"""IS 'the test % '' " \ table comment'"""
+    )
+    new_query, vals = convert("format", sql, args)
+    assert (new_query, vals) == (sql, args)
+
+
+def test_format_multiline():
+    args = 1, 2, 3
+    new_query, vals = convert("format", "SELECT -- Comment\n%s FROM t", args)
+    assert (new_query, vals) == ("SELECT -- Comment\n$1 FROM t", args)
+
+
+def test_py_format():
+    args = {"f2": 1, "f1": 2, "f3": 3}
+
+    new_query, vals = convert(
+        "pyformat",
+        "SELECT %(f2)s, %(f1)s, \"f1_%%\", E'txt_%%' "
+        "FROM t WHERE a=%(f2)s AND b='75%%'",
+        args,
+    )
+    expected = "SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$1 AND " "b='75%%'"
+    assert (new_query, vals) == (expected, (1, 2))
+
+    # pyformat should support %s and an array, too:
+    args = 1, 2, 3
+    new_query, vals = convert(
+        "pyformat",
+        "SELECT %s, %s, \"f1_%%\", E'txt_%%' " "FROM t WHERE a=%s AND b='75%%'",
+        args,
+    )
+    expected = "SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$3 AND " "b='75%%'"
+    assert (new_query, vals) == (expected, args)
diff -pruN 1.10.6-3/test/legacy/test_prepared_statement.py 1.23.0-0ubuntu1/test/legacy/test_prepared_statement.py
--- 1.10.6-3/test/legacy/test_prepared_statement.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_prepared_statement.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,12 @@
+def test_prepare(con):
+    con.prepare("SELECT CAST(:v AS INTEGER)")
+
+
+def test_run(con):
+    ps = con.prepare("SELECT cast(:v as varchar)")
+    ps.run(v="speedy")
+
+
+def test_run_with_no_results(con):
+    ps = con.prepare("ROLLBACK")
+    ps.run()
diff -pruN 1.10.6-3/test/legacy/test_query.py 1.23.0-0ubuntu1/test/legacy/test_query.py
--- 1.10.6-3/test/legacy/test_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_query.py	2021-11-12 20:32:36.000000000 +0000
@@ -0,0 +1,341 @@
+from datetime import datetime as Datetime, timezone as Timezone
+from warnings import filterwarnings
+
+import pytest
+
+import pg8000
+from pg8000.converters import INET_ARRAY, INTEGER
+
+
+# Tests relating to the basic operation of the database driver, driven by the
+# pg8000 custom interface.
+
+
+@pytest.fixture
+def db_table(request, con):
+    filterwarnings("ignore", "DB-API extension cursor.next()")
+    filterwarnings("ignore", "DB-API extension cursor.__iter__()")
+    con.paramstyle = "format"
+    with con.cursor() as cursor:
+        cursor.execute(
+            "CREATE TEMPORARY TABLE t1 (f1 int primary key, "
+            "f2 bigint not null, f3 varchar(50) null) "
+        )
+
+    def fin():
+        try:
+            with con.cursor() as cursor:
+                cursor.execute("drop table t1")
+        except pg8000.ProgrammingError:
+            pass
+
+    request.addfinalizer(fin)
+    return con
+
+
+def test_database_error(cursor):
+    with pytest.raises(pg8000.ProgrammingError):
+        cursor.execute("INSERT INTO t99 VALUES (1, 2, 3)")
+
+
+def test_parallel_queries(db_table):
+    with db_table.cursor() as cursor:
+        cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, None))
+        cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 10, None))
+        cursor.execute(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 100, None)
+        )
+        cursor.execute(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (4, 1000, None)
+        )
+        cursor.execute(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (5, 10000, None)
+        )
+        with db_table.cursor() as c1, db_table.cursor() as c2:
+            c1.execute("SELECT f1, f2, f3 FROM t1")
+            for row in c1:
+                f1, f2, f3 = row
+                c2.execute("SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (f1,))
+                for row in c2:
+                    f1, f2, f3 = row
+
+
+def test_parallel_open_portals(con):
+    with con.cursor() as c1, con.cursor() as c2:
+        c1count, c2count = 0, 0
+        q = "select * from generate_series(1, %s)"
+        params = (100,)
+        c1.execute(q, params)
+        c2.execute(q, params)
+        for c2row in c2:
+            c2count += 1
+        for c1row in c1:
+            c1count += 1
+
+    assert c1count == c2count
+
+
+# Run a query on a table, alter the structure of the table, then run the
+# original query again.
+
+
+def test_alter(db_table):
+    with db_table.cursor() as cursor:
+        cursor.execute("select * from t1")
+        cursor.execute("alter table t1 drop column f3")
+        cursor.execute("select * from t1")
+
+
+# Run a query on a table, drop then re-create the table, then run the
+# original query again.
+
+
+def test_create(db_table):
+    with db_table.cursor() as cursor:
+        cursor.execute("select * from t1")
+        cursor.execute("drop table t1")
+        cursor.execute("create temporary table t1 (f1 int primary key)")
+        cursor.execute("select * from t1")
+
+
+def test_insert_returning(db_table):
+    with db_table.cursor() as cursor:
+        cursor.execute("CREATE TABLE t2 (id serial, data text)")
+
+        # Test INSERT ... RETURNING with one row...
+        cursor.execute("INSERT INTO t2 (data) VALUES (%s) RETURNING id", ("test1",))
+        row_id = cursor.fetchone()[0]
+        cursor.execute("SELECT data FROM t2 WHERE id = %s", (row_id,))
+        assert "test1" == cursor.fetchone()[0]
+
+        assert cursor.rowcount == 1
+
+        # Test with multiple rows...
+        cursor.execute(
+            "INSERT INTO t2 (data) VALUES (%s), (%s), (%s) " "RETURNING id",
+            ("test2", "test3", "test4"),
+        )
+        assert cursor.rowcount == 3
+        ids = tuple([x[0] for x in cursor])
+        assert len(ids) == 3
+
+
+def test_row_count(db_table):
+    with db_table.cursor() as cursor:
+        expected_count = 57
+        cursor.executemany(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
+            tuple((i, i, None) for i in range(expected_count)),
+        )
+
+        # Check rowcount after executemany
+        assert expected_count == cursor.rowcount
+
+        cursor.execute("SELECT * FROM t1")
+
+        # Check row_count without doing any reading first...
+        assert expected_count == cursor.rowcount
+
+        # Check rowcount after reading some rows, make sure it still
+        # works...
+        for i in range(expected_count // 2):
+            cursor.fetchone()
+        assert expected_count == cursor.rowcount
+
+    with db_table.cursor() as cursor:
+        # Restart the cursor, read a few rows, and then check rowcount
+        # again...
+        cursor.execute("SELECT * FROM t1")
+        for i in range(expected_count // 3):
+            cursor.fetchone()
+        assert expected_count == cursor.rowcount
+
+        # Should be -1 for a command with no results
+        cursor.execute("DROP TABLE t1")
+        assert -1 == cursor.rowcount
+
+
+def test_row_count_update(db_table):
+    with db_table.cursor() as cursor:
+        cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, None))
+        cursor.execute("INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (2, 10, None))
+        cursor.execute(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (3, 100, None)
+        )
+        cursor.execute(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (4, 1000, None)
+        )
+        cursor.execute(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (5, 10000, None)
+        )
+        cursor.execute("UPDATE t1 SET f3 = %s WHERE f2 > 101", ("Hello!",))
+        assert cursor.rowcount == 2
+
+
+def test_int_oid(cursor):
+    # https://bugs.launchpad.net/pg8000/+bug/230796
+    cursor.execute("SELECT typname FROM pg_type WHERE oid = %s", (100,))
+
+
+def test_unicode_query(cursor):
+    cursor.execute(
+        "CREATE TEMPORARY TABLE \u043c\u0435\u0441\u0442\u043e "
+        "(\u0438\u043c\u044f VARCHAR(50), "
+        "\u0430\u0434\u0440\u0435\u0441 VARCHAR(250))"
+    )
+
+
+def test_executemany(db_table):
+    with db_table.cursor() as cursor:
+        cursor.executemany(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
+            ((1, 1, "Avast ye!"), (2, 1, None)),
+        )
+
+        cursor.executemany(
+            "SELECT CAST(%s AS TIMESTAMP)",
+            ((Datetime(2014, 5, 7, tzinfo=Timezone.utc),), (Datetime(2014, 5, 7),)),
+        )
+
+
+def test_executemany_setinputsizes(cursor):
+    """Make sure that setinputsizes works for all the parameter sets"""
+
+    cursor.execute(
+        "CREATE TEMPORARY TABLE t1 (f1 int primary key, f2 inet[] not null) "
+    )
+
+    cursor.setinputsizes(INTEGER, INET_ARRAY)
+    cursor.executemany(
+        "INSERT INTO t1 (f1, f2) VALUES (%s, %s)", ((1, ["1.1.1.1"]), (2, ["0.0.0.0"]))
+    )
+
+
+def test_executemany_no_param_sets(cursor):
+    cursor.executemany("INSERT INTO t1 (f1, f2) VALUES (%s, %s)", [])
+    assert cursor.rowcount == -1
+
+
+# Check that autocommit stays off
+# We keep track of whether we're in a transaction or not by using the
+# READY_FOR_QUERY message.
+def test_transactions(db_table):
+    with db_table.cursor() as cursor:
+        cursor.execute("commit")
+        cursor.execute(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)", (1, 1, "Zombie")
+        )
+        cursor.execute("rollback")
+        cursor.execute("select * from t1")
+
+        assert cursor.rowcount == 0
+
+
+def test_in(cursor):
+    cursor.execute("SELECT typname FROM pg_type WHERE oid = any(%s)", ([16, 23],))
+    ret = cursor.fetchall()
+    assert ret[0][0] == "bool"
+
+
+def test_no_previous_tpc(con):
+    con.tpc_begin("Stacey")
+    with con.cursor() as cursor:
+        cursor.execute("SELECT * FROM pg_type")
+        con.tpc_commit()
+
+
+# Check that tpc_recover() doesn't start a transaction
+def test_tpc_recover(con):
+    con.tpc_recover()
+    with con.cursor() as cursor:
+        con.autocommit = True
+
+        # If tpc_recover() has started a transaction, this will fail
+        cursor.execute("VACUUM")
+
+
+def test_tpc_prepare(con):
+    xid = "Stacey"
+    con.tpc_begin(xid)
+    con.tpc_prepare()
+    con.tpc_rollback(xid)
+
+
+# An empty query should raise a ProgrammingError
+def test_empty_query(cursor):
+    with pytest.raises(pg8000.ProgrammingError):
+        cursor.execute("")
+
+
+# rolling back when not in a transaction doesn't generate a warning
+def test_rollback_no_transaction(con):
+    # Remove any existing notices
+    con.notices.clear()
+
+    # First, verify that a raw rollback does produce a notice
+    con.execute_unnamed("rollback")
+
+    assert 1 == len(con.notices)
+
+    # 25P01 is the code for no_active_sql_tronsaction. It has
+    # a message and severity name, but those might be
+    # localized/depend on the server version.
+    assert con.notices.pop().get(b"C") == b"25P01"
+
+    # Now going through the rollback method doesn't produce
+    # any notices because it knows we're not in a transaction.
+    con.rollback()
+
+    assert 0 == len(con.notices)
+
+
+def test_context_manager_class(con):
+    assert "__enter__" in pg8000.legacy.Cursor.__dict__
+    assert "__exit__" in pg8000.legacy.Cursor.__dict__
+
+    with con.cursor() as cursor:
+        cursor.execute("select 1")
+
+
+def test_close_prepared_statement(con):
+    ps = con.prepare("select 1")
+    ps.run()
+    res = con.run("select count(*) from pg_prepared_statements")
+    assert res[0][0] == 1  # Should have one prepared statement
+
+    ps.close()
+
+    res = con.run("select count(*) from pg_prepared_statements")
+    assert res[0][0] == 0  # Should have no prepared statements
+
+
+def test_setinputsizes(con):
+    cursor = con.cursor()
+    cursor.setinputsizes(20)
+    cursor.execute("select %s", (None,))
+    retval = cursor.fetchall()
+    assert retval[0][0] is None
+
+
+def test_setinputsizes_class(con):
+    cursor = con.cursor()
+    cursor.setinputsizes(bytes)
+    cursor.execute("select %s", (None,))
+    retval = cursor.fetchall()
+    assert retval[0][0] is None
+
+
+def test_unexecuted_cursor_rowcount(con):
+    cursor = con.cursor()
+    assert cursor.rowcount == -1
+
+
+def test_unexecuted_cursor_description(con):
+    cursor = con.cursor()
+    assert cursor.description is None
+
+
+def test_not_parsed_if_no_params(mocker, cursor):
+    mock_convert_paramstyle = mocker.patch("pg8000.legacy.convert_paramstyle")
+    cursor.execute("ROLLBACK")
+    mock_convert_paramstyle.assert_not_called()
diff -pruN 1.10.6-3/test/legacy/test_typeconversion.py 1.23.0-0ubuntu1/test/legacy/test_typeconversion.py
--- 1.10.6-3/test/legacy/test_typeconversion.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_typeconversion.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,784 @@
+import decimal
+import ipaddress
+import os
+import time
+import uuid
+from collections import OrderedDict
+from datetime import (
+    date as Date,
+    datetime as Datetime,
+    time as Time,
+    timedelta as Timedelta,
+    timezone as Timezone,
+)
+from enum import Enum
+from json import dumps
+
+import pytest
+
+import pytz
+
+import pg8000.converters
+from pg8000 import (
+    Binary,
+    INTERVAL,
+    PGInterval,
+    ProgrammingError,
+    pginterval_in,
+    pginterval_out,
+    timedelta_in,
+)
+
+
+# Type conversion tests
+
+
+def test_time_roundtrip(con):
+    retval = con.run("SELECT cast(:t as time) as f1", t=Time(4, 5, 6))
+    assert retval[0][0] == Time(4, 5, 6)
+
+
+def test_date_roundtrip(con):
+    v = Date(2001, 2, 3)
+    retval = con.run("SELECT cast(:d as date) as f1", d=v)
+    assert retval[0][0] == v
+
+
+def test_bool_roundtrip(con):
+    retval = con.run("SELECT cast(:b as bool) as f1", b=True)
+    assert retval[0][0] is True
+
+
+def test_null_roundtrip(con):
+    retval = con.run("select current_setting('server_version')")
+    version = retval[0][0][:2]
+
+    if version.startswith("9"):
+        # Prior to PostgreSQL version 10 We can't just "SELECT %s" and set
+        # None as the parameter, since it has no type.  That would result
+        # in a PG error, "could not determine data type of parameter %s".
+        # So we create a temporary table, insert null values, and read them
+        # back.
+        con.run(
+            "CREATE TEMPORARY TABLE TestNullWrite "
+            "(f1 int4, f2 timestamp, f3 varchar)"
+        )
+        con.run(
+            "INSERT INTO TestNullWrite VALUES (:v1, :v2, :v3)",
+            v1=None,
+            v2=None,
+            v3=None,
+        )
+        retval = con.run("SELECT * FROM TestNullWrite")
+        assert retval[0] == [None, None, None]
+
+        with pytest.raises(ProgrammingError):
+            con.run("SELECT :v as f1", v=None)
+    else:
+        retval = con.run("SELECT :v", v=None)
+        assert retval[0][0] is None
+
+
+def test_decimal_roundtrip(cursor):
+    values = ("1.1", "-1.1", "10000", "20000", "-1000000000.123456789", "1.0", "12.44")
+    for v in values:
+        cursor.execute("SELECT CAST(%s AS NUMERIC)", (decimal.Decimal(v),))
+        retval = cursor.fetchall()
+        assert str(retval[0][0]) == v
+
+
+def test_float_roundtrip(con):
+    val = 1.756e-12
+    retval = con.run("SELECT cast(:v as double precision)", v=val)
+    assert retval[0][0] == val
+
+
+def test_float_plus_infinity_roundtrip(con):
+    v = float("inf")
+    retval = con.run("SELECT cast(:v as double precision)", v=v)
+    assert retval[0][0] == v
+
+
+def test_str_roundtrip(cursor):
+    v = "hello world"
+    cursor.execute("create temporary table test_str (f character varying(255))")
+    cursor.execute("INSERT INTO test_str VALUES (%s)", (v,))
+    retval = tuple(cursor.execute("SELECT * from test_str"))
+    assert retval[0][0] == v
+
+
+def test_str_then_int(cursor):
+    v1 = "hello world"
+    retval = tuple(cursor.execute("SELECT cast(%s as varchar) as f1", (v1,)))
+    assert retval[0][0] == v1
+
+    v2 = 1
+    retval = tuple(cursor.execute("SELECT cast(%s as varchar) as f1", (v2,)))
+    assert retval[0][0] == str(v2)
+
+
+def test_unicode_roundtrip(cursor):
+    v = "hello \u0173 world"
+    retval = tuple(cursor.execute("SELECT cast(%s as varchar) as f1", (v,)))
+    assert retval[0][0] == v
+
+
+def test_long_roundtrip(con):
+    v = 50000000000000
+    retval = con.run("SELECT cast(:v as bigint)", v=v)
+    assert retval[0][0] == v
+
+
+def test_int_execute_many_select(cursor):
+    tuple(cursor.executemany("SELECT CAST(%s AS INTEGER)", ((1,), (40000,))))
+
+
+def test_int_execute_many_insert(cursor):
+    v = ([None], [4])
+    cursor.execute("create temporary table test_int (f integer)")
+    cursor.executemany("INSERT INTO test_int VALUES (%s)", v)
+    retval = tuple(cursor.execute("SELECT * from test_int"))
+    assert retval == v
+
+
+def test_insert_null(con):
+    v = None
+    con.run("CREATE TEMPORARY TABLE test_int (f INTEGER)")
+    con.run("INSERT INTO test_int VALUES (:v)", v=v)
+    retval = con.run("SELECT * FROM test_int")
+    assert retval[0][0] == v
+
+
+def test_int_roundtrip(con):
+    int2 = 21
+    int4 = 23
+    int8 = 20
+
+    MAP = {
+        int2: "int2",
+        int4: "int4",
+        int8: "int8",
+    }
+
+    test_values = [
+        (0, int2),
+        (-32767, int2),
+        (-32768, int4),
+        (+32767, int2),
+        (+32768, int4),
+        (-2147483647, int4),
+        (-2147483648, int8),
+        (+2147483647, int4),
+        (+2147483648, int8),
+        (-9223372036854775807, int8),
+        (+9223372036854775807, int8),
+    ]
+
+    for value, typoid in test_values:
+        retval = con.run("SELECT cast(:v as " + MAP[typoid] + ")", v=value)
+        assert retval[0][0] == value
+        column_name, column_typeoid = con.description[0][0:2]
+        assert column_typeoid == typoid
+
+
+def test_bytea_roundtrip(con):
+    retval = con.run(
+        "SELECT cast(:v as bytea)", v=Binary(b"\x00\x01\x02\x03\x02\x01\x00")
+    )
+    assert retval[0][0] == b"\x00\x01\x02\x03\x02\x01\x00"
+
+
+def test_bytearray_round_trip(con):
+    binary = b"\x00\x01\x02\x03\x02\x01\x00"
+    retval = con.run("SELECT cast(:v as bytea)", v=bytearray(binary))
+    assert retval[0][0] == binary
+
+
+def test_bytearray_subclass_round_trip(con):
+    class BClass(bytearray):
+        pass
+
+    binary = b"\x00\x01\x02\x03\x02\x01\x00"
+    retval = con.run("SELECT cast(:v as bytea)", v=BClass(binary))
+    assert retval[0][0] == binary
+
+
+def test_timestamp_roundtrip(is_java, con):
+    v = Datetime(2001, 2, 3, 4, 5, 6, 170000)
+    retval = con.run("SELECT cast(:v as timestamp)", v=v)
+    assert retval[0][0] == v
+
+    # Test that time zone doesn't affect it
+    # Jython 2.5.3 doesn't have a time.tzset() so skip
+    if not is_java:
+        orig_tz = os.environ.get("TZ")
+        os.environ["TZ"] = "America/Edmonton"
+        time.tzset()
+
+        retval = con.run("SELECT cast(:v as timestamp)", v=v)
+        assert retval[0][0] == v
+
+        if orig_tz is None:
+            del os.environ["TZ"]
+        else:
+            os.environ["TZ"] = orig_tz
+        time.tzset()
+
+
+def test_interval_repr():
+    v = PGInterval(microseconds=123456789, days=2, months=24)
+    assert repr(v) == "<PGInterval 24 months 2 days 123456789 microseconds>"
+
+
+def test_interval_in_1_year():
+    assert pginterval_in("1 year") == PGInterval(years=1)
+
+
+def test_timedelta_in_2_months():
+    assert timedelta_in("2 hours")
+
+
+def test_interval_roundtrip(con):
+    con.register_in_adapter(INTERVAL, pginterval_in)
+    con.register_out_adapter(PGInterval, pginterval_out)
+    v = PGInterval(microseconds=123456789, days=2, months=24)
+    retval = con.run("SELECT cast(:v as interval)", v=v)
+    assert retval[0][0] == v
+
+
+def test_timedelta_roundtrip(con):
+    v = Timedelta(seconds=30)
+    retval = con.run("SELECT cast(:v as interval)", v=v)
+    assert retval[0][0] == v
+
+
+def test_enum_str_round_trip(cursor):
+    try:
+        cursor.execute("create type lepton as enum ('electron', 'muon', 'tau')")
+
+        v = "muon"
+        cursor.execute("SELECT cast(%s as lepton) as f1", (v,))
+        retval = cursor.fetchall()
+        assert retval[0][0] == v
+        cursor.execute("CREATE TEMPORARY TABLE testenum (f1 lepton)")
+        cursor.execute(
+            "INSERT INTO testenum VALUES (cast(%s as lepton))", ("electron",)
+        )
+    finally:
+        cursor.execute("drop table testenum")
+        cursor.execute("drop type lepton")
+
+
+def test_enum_custom_round_trip(con):
+    class Lepton:
+        # Implements PEP 435 in the minimal fashion needed
+        __members__ = OrderedDict()
+
+        def __init__(self, name, value, alias=None):
+            self.name = name
+            self.value = value
+            self.__members__[name] = self
+            setattr(self.__class__, name, self)
+            if alias:
+                self.__members__[alias] = self
+                setattr(self.__class__, alias, self)
+
+    def lepton_out(lepton):
+        return lepton.value
+
+    try:
+        con.run("create type lepton as enum ('1', '2', '3')")
+        con.register_out_adapter(Lepton, lepton_out)
+
+        v = Lepton("muon", "2")
+        retval = con.run("SELECT CAST(:v AS lepton)", v=v)
+        assert retval[0][0] == v.value
+    finally:
+        con.run("drop type lepton")
+
+
+def test_enum_py_round_trip(cursor):
+    class Lepton(Enum):
+        electron = "1"
+        muon = "2"
+        tau = "3"
+
+    try:
+        cursor.execute("create type lepton as enum ('1', '2', '3')")
+
+        v = Lepton.muon
+        retval = tuple(cursor.execute("SELECT cast(%s as lepton) as f1", (v,)))
+        assert retval[0][0] == v.value
+
+        cursor.execute("CREATE TEMPORARY TABLE testenum (f1 lepton)")
+        cursor.execute(
+            "INSERT INTO testenum VALUES (cast(%s as lepton))", (Lepton.electron,)
+        )
+    finally:
+        cursor.execute("drop table testenum")
+        cursor.execute("drop type lepton")
+
+
+def test_xml_roundtrip(cursor):
+    v = "<genome>gatccgagtac</genome>"
+    retval = tuple(cursor.execute("select xmlparse(content %s) as f1", (v,)))
+    assert retval[0][0] == v
+
+
+def test_uuid_roundtrip(con):
+    v = uuid.UUID("911460f2-1f43-fea2-3e2c-e01fd5b5069d")
+    retval = con.run("select cast(:v as uuid)", v=v)
+    assert retval[0][0] == v
+
+
+def test_inet_roundtrip_network(con):
+    v = ipaddress.ip_network("192.168.0.0/28")
+    retval = con.run("select cast(:v as inet)", v=v)
+    assert retval[0][0] == v
+
+
+def test_inet_roundtrip_address(con):
+    v = ipaddress.ip_address("192.168.0.1")
+    retval = con.run("select cast(:v as inet)", v=v)
+    assert retval[0][0] == v
+
+
+def test_xid_roundtrip(cursor):
+    v = 86722
+    cursor.execute("select cast(cast(%s as varchar) as xid) as f1", (v,))
+    retval = cursor.fetchall()
+    assert retval[0][0] == v
+
+    # Should complete without an exception
+    cursor.execute("select * from pg_locks where transactionid = %s", (97712,))
+    retval = cursor.fetchall()
+
+
+def test_int2vector_in(cursor):
+    retval = tuple(cursor.execute("select cast('1 2' as int2vector) as f1"))
+    assert retval[0][0] == [1, 2]
+
+    # Should complete without an exception
+    tuple(cursor.execute("select indkey from pg_index"))
+
+
+def test_timestamp_tz_out(cursor):
+    cursor.execute(
+        "SELECT '2001-02-03 04:05:06.17 America/Edmonton'" "::timestamp with time zone"
+    )
+    retval = cursor.fetchall()
+    dt = retval[0][0]
+    assert dt.tzinfo is not None, "no tzinfo returned"
+    assert dt.astimezone(Timezone.utc) == Datetime(
+        2001, 2, 3, 11, 5, 6, 170000, Timezone.utc
+    ), "retrieved value match failed"
+
+
+def test_timestamp_tz_roundtrip(is_java, con):
+    if not is_java:
+        mst = pytz.timezone("America/Edmonton")
+        v1 = mst.localize(Datetime(2001, 2, 3, 4, 5, 6, 170000))
+        retval = con.run("SELECT cast(:v as timestamptz)", v=v1)
+        v2 = retval[0][0]
+        assert v2.tzinfo is not None
+        assert v1 == v2
+
+
+def test_timestamp_mismatch(is_java, cursor):
+    if not is_java:
+        mst = pytz.timezone("America/Edmonton")
+        cursor.execute("SET SESSION TIME ZONE 'America/Edmonton'")
+        try:
+            cursor.execute(
+                "CREATE TEMPORARY TABLE TestTz "
+                "(f1 timestamp with time zone, "
+                "f2 timestamp without time zone)"
+            )
+            cursor.execute(
+                "INSERT INTO TestTz (f1, f2) VALUES (%s, %s)",
+                (
+                    # insert timestamp into timestamptz field (v1)
+                    Datetime(2001, 2, 3, 4, 5, 6, 170000),
+                    # insert timestamptz into timestamp field (v2)
+                    mst.localize(Datetime(2001, 2, 3, 4, 5, 6, 170000)),
+                ),
+            )
+            cursor.execute("SELECT f1, f2 FROM TestTz")
+            retval = cursor.fetchall()
+
+            # when inserting a timestamp into a timestamptz field,
+            # postgresql assumes that it is in local time. So the value
+            # that comes out will be the server's local time interpretation
+            # of v1. We've set the server's TZ to MST, the time should
+            # be...
+            f1 = retval[0][0]
+            assert f1 == Datetime(2001, 2, 3, 11, 5, 6, 170000, Timezone.utc)
+
+            # inserting the timestamptz into a timestamp field, pg8000
+            # converts the value into UTC, and then the PG server converts
+            # it into local time for insertion into the field. When we
+            # query for it, we get the same time back, like the tz was
+            # dropped.
+            f2 = retval[0][1]
+            assert f2 == Datetime(2001, 2, 3, 11, 5, 6, 170000)
+        finally:
+            cursor.execute("SET SESSION TIME ZONE DEFAULT")
+
+
+def test_name_out(cursor):
+    # select a field that is of "name" type:
+    tuple(cursor.execute("SELECT usename FROM pg_user"))
+    # It is sufficient that no errors were encountered.
+
+
+def test_oid_out(cursor):
+    tuple(cursor.execute("SELECT oid FROM pg_type"))
+    # It is sufficient that no errors were encountered.
+
+
+def test_boolean_in(cursor):
+    retval = tuple(cursor.execute("SELECT cast('t' as bool)"))
+    assert retval[0][0]
+
+
+def test_numeric_out(cursor):
+    for num in ("5000", "50.34"):
+        retval = tuple(cursor.execute("SELECT " + num + "::numeric"))
+        assert str(retval[0][0]) == num
+
+
+def test_int2_out(cursor):
+    retval = tuple(cursor.execute("SELECT 5000::smallint"))
+    assert retval[0][0] == 5000
+
+
+def test_int4_out(cursor):
+    retval = tuple(cursor.execute("SELECT 5000::integer"))
+    assert retval[0][0] == 5000
+
+
+def test_int8_out(cursor):
+    retval = tuple(cursor.execute("SELECT 50000000000000::bigint"))
+    assert retval[0][0] == 50000000000000
+
+
+def test_float4_out(cursor):
+    retval = tuple(cursor.execute("SELECT 1.1::real"))
+    assert retval[0][0] == 1.1
+
+
+def test_float8_out(cursor):
+    retval = tuple(cursor.execute("SELECT 1.1::double precision"))
+    assert retval[0][0] == 1.1000000000000001
+
+
+def test_varchar_out(cursor):
+    retval = tuple(cursor.execute("SELECT 'hello'::varchar(20)"))
+    assert retval[0][0] == "hello"
+
+
+def test_char_out(cursor):
+    retval = tuple(cursor.execute("SELECT 'hello'::char(20)"))
+    assert retval[0][0] == "hello               "
+
+
+def test_text_out(cursor):
+    retval = tuple(cursor.execute("SELECT 'hello'::text"))
+    assert retval[0][0] == "hello"
+
+
+def test_interval_in(con):
+    con.register_in_adapter(INTERVAL, pginterval_in)
+    retval = con.run(
+        "SELECT '1 month 16 days 12 hours 32 minutes 64 seconds'::interval"
+    )
+    expected_value = PGInterval(
+        microseconds=(12 * 60 * 60 * 1000 * 1000)
+        + (32 * 60 * 1000 * 1000)
+        + (64 * 1000 * 1000),
+        days=16,
+        months=1,
+    )
+    assert retval[0][0] == expected_value
+
+
+def test_interval_in_30_seconds(con):
+    retval = con.run("select interval '30 seconds'")
+    assert retval[0][0] == Timedelta(seconds=30)
+
+
+def test_interval_in_12_days_30_seconds(con):
+    retval = con.run("select interval '12 days 30 seconds'")
+    assert retval[0][0] == Timedelta(days=12, seconds=30)
+
+
+def test_timestamp_out(cursor):
+    cursor.execute("SELECT '2001-02-03 04:05:06.17'::timestamp")
+    retval = cursor.fetchall()
+    assert retval[0][0] == Datetime(2001, 2, 3, 4, 5, 6, 170000)
+
+
+def test_int4_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::INT[] AS f1, '{{1,2,3},{4,5,6}}'::INT[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_int2_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::INT2[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::INT2[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT2[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_int8_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::INT8[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::INT8[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT8[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_bool_array_out(cursor):
+    cursor.execute(
+        "SELECT '{TRUE,FALSE,FALSE,TRUE}'::BOOL[] AS f1, "
+        "'{{TRUE,FALSE,TRUE},{FALSE,TRUE,FALSE}}'::BOOL[][] AS f2, "
+        "'{{{TRUE,FALSE},{FALSE,TRUE}},{{NULL,TRUE},{FALSE,FALSE}}}'"
+        "::BOOL[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [True, False, False, True]
+    assert f2 == [[True, False, True], [False, True, False]]
+    assert f3 == [[[True, False], [False, True]], [[None, True], [False, False]]]
+
+
+def test_float4_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::FLOAT4[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::FLOAT4[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::FLOAT4[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_float8_array_out(cursor):
+    cursor.execute(
+        "SELECT '{1,2,3,4}'::FLOAT8[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::FLOAT8[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::FLOAT8[][][] AS f3"
+    )
+    f1, f2, f3 = cursor.fetchone()
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_int_array_roundtrip_small(con):
+    """send small int array, should be sent as INT2[]"""
+    retval = con.run("SELECT cast(:v as int2[])", v=[1, 2, 3])
+    assert retval[0][0], [1, 2, 3]
+    column_name, column_typeoid = con.description[0][0:2]
+    assert column_typeoid == 1005, "type should be INT2[]"
+
+
+def test_int_array_roundtrip_multi(con):
+    """test multi-dimensional array, should be sent as INT2[]"""
+    retval = con.run("SELECT cast(:v as int2[])", v=[[1, 2], [3, 4]])
+    assert retval[0][0] == [[1, 2], [3, 4]]
+
+    column_name, column_typeoid = con.description[0][0:2]
+    assert column_typeoid == 1005, "type should be INT2[]"
+
+
+def test_int4_array_roundtrip(con):
+    """a larger value should kick it up to INT4[]..."""
+    retval = con.run("SELECT cast(:v as int4[])", v=[70000, 2, 3])
+    assert retval[0][0] == [70000, 2, 3]
+    column_name, column_typeoid = con.description[0][0:2]
+    assert column_typeoid == 1007, "type should be INT4[]"
+
+
+def test_int8_array_roundtrip(con):
+    """a much larger value should kick it up to INT8[]..."""
+    retval = con.run("SELECT cast(:v as int8[])", v=[7000000000, 2, 3])
+    assert retval[0][0] == [7000000000, 2, 3], "retrieved value match failed"
+    column_name, column_typeoid = con.description[0][0:2]
+    assert column_typeoid == 1016, "type should be INT8[]"
+
+
+def test_int_array_with_null_roundtrip(con):
+    retval = con.run("SELECT cast(:v as int[])", v=[1, None, 3])
+    assert retval[0][0] == [1, None, 3]
+
+
+def test_float_array_roundtrip(con):
+    retval = con.run("SELECT cast(:v as double precision[])", v=[1.1, 2.2, 3.3])
+    assert retval[0][0] == [1.1, 2.2, 3.3]
+
+
+def test_bool_array_roundtrip(con):
+    retval = con.run("SELECT cast(:v as bool[])", v=[True, False, None])
+    assert retval[0][0] == [True, False, None]
+
+
+@pytest.mark.parametrize(
+    "test_input,expected",
+    [
+        ("SELECT '{a,b,c}'::TEXT[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{a,b,c}'::CHAR[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{a,b,c}'::VARCHAR[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{a,b,c}'::CSTRING[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{a,b,c}'::NAME[] AS f1", ["a", "b", "c"]),
+        ("SELECT '{}'::text[];", []),
+        ('SELECT \'{NULL,"NULL",NULL,""}\'::text[];', [None, "NULL", None, ""]),
+    ],
+)
+def test_string_array_out(con, test_input, expected):
+    result = con.run(test_input)
+    assert result[0][0] == expected
+
+
+def test_numeric_array_out(cursor):
+    cursor.execute("SELECT '{1.1,2.2,3.3}'::numeric[] AS f1")
+    assert cursor.fetchone()[0] == [
+        decimal.Decimal("1.1"),
+        decimal.Decimal("2.2"),
+        decimal.Decimal("3.3"),
+    ]
+
+
+def test_numeric_array_roundtrip(con):
+    v = [decimal.Decimal("1.1"), None, decimal.Decimal("3.3")]
+    retval = con.run("SELECT cast(:v as numeric[])", v=v)
+    assert retval[0][0] == v
+
+
+def test_string_array_roundtrip(con):
+    v = [
+        "Hello!",
+        "World!",
+        "abcdefghijklmnopqrstuvwxyz",
+        "",
+        "A bunch of random characters:",
+        " ~!@#$%^&*()_+`1234567890-=[]\\{}|{;':\",./<>?\t",
+        None,
+    ]
+    retval = con.run("SELECT cast(:v as varchar[])", v=v)
+    assert retval[0][0] == v
+
+
+def test_array_string_escape():
+    v = '"'
+    res = pg8000.converters.array_string_escape(v)
+    assert res == '"\\""'
+
+
+def test_empty_array(con):
+    v = []
+    retval = con.run("SELECT cast(:v as varchar[])", v=v)
+    assert retval[0][0] == v
+
+
+def test_macaddr(cursor):
+    retval = tuple(cursor.execute("SELECT macaddr '08002b:010203'"))
+    assert retval[0][0] == "08:00:2b:01:02:03"
+
+
+def test_tsvector_roundtrip(cursor):
+    cursor.execute(
+        "SELECT cast(%s as tsvector)", ("a fat cat sat on a mat and ate a fat rat",)
+    )
+    retval = cursor.fetchall()
+    assert retval[0][0] == "'a' 'and' 'ate' 'cat' 'fat' 'mat' 'on' 'rat' 'sat'"
+
+
+def test_hstore_roundtrip(cursor):
+    val = '"a"=>"1"'
+    retval = tuple(cursor.execute("SELECT cast(%s as hstore)", (val,)))
+    assert retval[0][0] == val
+
+
+def test_json_roundtrip(con):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    retval = con.run("SELECT cast(:v as jsonb)", v=dumps(val))
+    assert retval[0][0] == val
+
+
+def test_jsonb_roundtrip(cursor):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    cursor.execute("SELECT cast(%s as jsonb)", (dumps(val),))
+    retval = cursor.fetchall()
+    assert retval[0][0] == val
+
+
+def test_json_access_object(cursor):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    cursor.execute("SELECT cast(%s as json) -> %s", (dumps(val), "name"))
+    retval = cursor.fetchall()
+    assert retval[0][0] == "Apollo 11 Cave"
+
+
+def test_jsonb_access_object(cursor):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    cursor.execute("SELECT cast(%s as jsonb) -> %s", (dumps(val), "name"))
+    retval = cursor.fetchall()
+    assert retval[0][0] == "Apollo 11 Cave"
+
+
+def test_json_access_array(con):
+    val = [-1, -2, -3, -4, -5]
+    retval = con.run(
+        "SELECT cast(:v1 as json) -> cast(:v2 as int)", v1=dumps(val), v2=2
+    )
+    assert retval[0][0] == -3
+
+
+def test_jsonb_access_array(con):
+    val = [-1, -2, -3, -4, -5]
+    retval = con.run(
+        "SELECT cast(:v1 as jsonb) -> cast(:v2 as int)", v1=dumps(val), v2=2
+    )
+    assert retval[0][0] == -3
+
+
+def test_jsonb_access_path(con):
+    j = {"a": [1, 2, 3], "b": [4, 5, 6]}
+
+    path = ["a", "2"]
+
+    retval = con.run("SELECT cast(:v1 as jsonb) #>> :v2", v1=dumps(j), v2=path)
+    assert retval[0][0] == str(j[path[0]][int(path[1])])
+
+
+def test_infinity_timestamp_roundtrip(cursor):
+    v = "infinity"
+    retval = tuple(cursor.execute("SELECT cast(%s as timestamp) as f1", (v,)))
+    assert retval[0][0] == v
+
+
+def test_point_roundtrip(cursor):
+    v = "(2.3,1)"
+    retval = tuple(cursor.execute("SELECT cast(%s as point) as f1", (v,)))
+    assert retval[0][0] == v
+
+
+def test_time_in():
+    actual = pg8000.converters.time_in("12:57:18.000396")
+    assert actual == Time(12, 57, 18, 396)
diff -pruN 1.10.6-3/test/legacy/test_typeobjects.py 1.23.0-0ubuntu1/test/legacy/test_typeobjects.py
--- 1.10.6-3/test/legacy/test_typeobjects.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/legacy/test_typeobjects.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,8 @@
+from pg8000 import PGInterval
+
+
+def test_pginterval_constructor_days():
+    i = PGInterval(days=1)
+    assert i.months is None
+    assert i.days == 1
+    assert i.microseconds is None
diff -pruN 1.10.6-3/test/native/conftest.py 1.23.0-0ubuntu1/test/native/conftest.py
--- 1.10.6-3/test/native/conftest.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/conftest.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,55 @@
+import sys
+from os import environ
+
+import pytest
+
+import pg8000.native
+
+
+@pytest.fixture(scope="class")
+def db_kwargs():
+    db_connect = {"user": "postgres", "password": "pw"}
+
+    for kw, var, f in [
+        ("host", "PGHOST", str),
+        ("password", "PGPASSWORD", str),
+        ("port", "PGPORT", int),
+    ]:
+        try:
+            db_connect[kw] = f(environ[var])
+        except KeyError:
+            pass
+
+    return db_connect
+
+
+@pytest.fixture
+def con(request, db_kwargs):
+    conn = pg8000.native.Connection(**db_kwargs)
+
+    def fin():
+        try:
+            conn.run("rollback")
+        except pg8000.native.InterfaceError:
+            pass
+
+        try:
+            conn.close()
+        except pg8000.native.InterfaceError:
+            pass
+
+    request.addfinalizer(fin)
+    return conn
+
+
+@pytest.fixture
+def pg_version(con):
+    retval = con.run("select current_setting('server_version')")
+    version = retval[0][0]
+    idx = version.index(".")
+    return int(version[:idx])
+
+
+@pytest.fixture(scope="module")
+def is_java():
+    return "java" in sys.platform.lower()
diff -pruN 1.10.6-3/test/native/github-actions/gss_native.py 1.23.0-0ubuntu1/test/native/github-actions/gss_native.py
--- 1.10.6-3/test/native/github-actions/gss_native.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/github-actions/gss_native.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,14 @@
+import pytest
+
+import pg8000.dbapi
+
+
+def test_gss(db_kwargs):
+    """Called by GitHub Actions with auth method gss."""
+
+    # Should raise an exception saying gss isn't supported
+    with pytest.raises(
+        pg8000.dbapi.InterfaceError,
+        match="Authentication method 7 not supported by pg8000.",
+    ):
+        pg8000.dbapi.connect(**db_kwargs)
diff -pruN 1.10.6-3/test/native/github-actions/md5_native.py 1.23.0-0ubuntu1/test/native/github-actions/md5_native.py
--- 1.10.6-3/test/native/github-actions/md5_native.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/github-actions/md5_native.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_md5(con):
+    """Called by GitHub Actions with auth method md5.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/native/github-actions/password_native.py 1.23.0-0ubuntu1/test/native/github-actions/password_native.py
--- 1.10.6-3/test/native/github-actions/password_native.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/github-actions/password_native.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_password(con):
+    """Called by GitHub Actions with auth method password.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/native/github-actions/scram-sha-256_native.py 1.23.0-0ubuntu1/test/native/github-actions/scram-sha-256_native.py
--- 1.10.6-3/test/native/github-actions/scram-sha-256_native.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/github-actions/scram-sha-256_native.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,5 @@
+def test_scram_sha_256(con):
+    """Called by GitHub Actions with auth method scram-sha-256.
+    We just need to check that we can get a connection.
+    """
+    pass
diff -pruN 1.10.6-3/test/native/github-actions/ssl_native.py 1.23.0-0ubuntu1/test/native/github-actions/ssl_native.py
--- 1.10.6-3/test/native/github-actions/ssl_native.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/github-actions/ssl_native.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,63 @@
+import ssl
+import sys
+
+import pytest
+
+import pg8000.dbapi
+
+
+# Check if running in Jython
+if "java" in sys.platform:
+    from javax.net.ssl import TrustManager, X509TrustManager
+    from jarray import array
+    from javax.net.ssl import SSLContext
+
+    class TrustAllX509TrustManager(X509TrustManager):
+        """Define a custom TrustManager which will blindly accept all
+        certificates"""
+
+        def checkClientTrusted(self, chain, auth):
+            pass
+
+        def checkServerTrusted(self, chain, auth):
+            pass
+
+        def getAcceptedIssuers(self):
+            return None
+
+    # Create a static reference to an SSLContext which will use
+    # our custom TrustManager
+    trust_managers = array([TrustAllX509TrustManager()], TrustManager)
+    TRUST_ALL_CONTEXT = SSLContext.getInstance("SSL")
+    TRUST_ALL_CONTEXT.init(None, trust_managers, None)
+    # Keep a static reference to the JVM's default SSLContext for restoring
+    # at a later time
+    DEFAULT_CONTEXT = SSLContext.getDefault()
+
+
+@pytest.fixture
+def trust_all_certificates(request):
+    """Decorator function that will make it so the context of the decorated
+    method will run with our TrustManager that accepts all certificates"""
+    # Only do this if running under Jython
+    is_java = "java" in sys.platform
+
+    if is_java:
+        from javax.net.ssl import SSLContext
+
+        SSLContext.setDefault(TRUST_ALL_CONTEXT)
+
+    def fin():
+        if is_java:
+            SSLContext.setDefault(DEFAULT_CONTEXT)
+
+    request.addfinalizer(fin)
+
+
+@pytest.mark.usefixtures("trust_all_certificates")
+def test_ssl(db_kwargs):
+    context = ssl.SSLContext()
+    context.check_hostname = False
+    db_kwargs["ssl_context"] = context
+    with pg8000.dbapi.connect(**db_kwargs):
+        pass
diff -pruN 1.10.6-3/test/native/test_auth.py 1.23.0-0ubuntu1/test/native/test_auth.py
--- 1.10.6-3/test/native/test_auth.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_auth.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,106 @@
+import ssl
+import sys
+
+import pytest
+
+import pg8000.native
+
+
+def test_gss(db_kwargs):
+    """This requires a line in pg_hba.conf that requires gss for the database
+    pg8000_gss
+    """
+
+    db_kwargs["database"] = "pg8000_gss"
+
+    # Should raise an exception saying gss isn't supported
+    with pytest.raises(
+        pg8000.native.InterfaceError,
+        match="Authentication method 7 not supported by pg8000.",
+    ):
+        pg8000.native.Connection(**db_kwargs)
+
+
+# Check if running in Jython
+if "java" in sys.platform:
+    from javax.net.ssl import TrustManager, X509TrustManager
+    from jarray import array
+    from javax.net.ssl import SSLContext
+
+    class TrustAllX509TrustManager(X509TrustManager):
+        """Define a custom TrustManager which will blindly accept all
+        certificates"""
+
+        def checkClientTrusted(self, chain, auth):
+            pass
+
+        def checkServerTrusted(self, chain, auth):
+            pass
+
+        def getAcceptedIssuers(self):
+            return None
+
+    # Create a static reference to an SSLContext which will use
+    # our custom TrustManager
+    trust_managers = array([TrustAllX509TrustManager()], TrustManager)
+    TRUST_ALL_CONTEXT = SSLContext.getInstance("SSL")
+    TRUST_ALL_CONTEXT.init(None, trust_managers, None)
+    # Keep a static reference to the JVM's default SSLContext for restoring
+    # at a later time
+    DEFAULT_CONTEXT = SSLContext.getDefault()
+
+
+@pytest.fixture
+def trust_all_certificates(request):
+    """Decorator function that will make it so the context of the decorated
+    method will run with our TrustManager that accepts all certificates"""
+    # Only do this if running under Jython
+    is_java = "java" in sys.platform
+
+    if is_java:
+        from javax.net.ssl import SSLContext
+
+        SSLContext.setDefault(TRUST_ALL_CONTEXT)
+
+    def fin():
+        if is_java:
+            SSLContext.setDefault(DEFAULT_CONTEXT)
+
+    request.addfinalizer(fin)
+
+
+@pytest.mark.usefixtures("trust_all_certificates")
+def test_ssl(db_kwargs):
+    context = ssl.SSLContext()
+    context.check_hostname = False
+    db_kwargs["ssl_context"] = context
+    with pg8000.native.Connection(**db_kwargs):
+        pass
+
+
+# This requires a line in pg_hba.conf that requires scram-sha-256 for the
+# database scram_sha_256
+
+
+def test_scram_sha_256(db_kwargs):
+    db_kwargs["database"] = "pg8000_scram_sha_256"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.native.DatabaseError, match="3D000"):
+        pg8000.native.Connection(**db_kwargs)
+
+
+# This requires a line in pg_hba.conf that requires scram-sha-256 for the
+# database scram_sha_256
+
+
+def test_scram_sha_256_plus(db_kwargs):
+    context = ssl.SSLContext()
+    context.check_hostname = False
+    db_kwargs["ssl_context"] = context
+    db_kwargs["database"] = "pg8000_scram_sha_256"
+
+    print(db_kwargs)
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(pg8000.native.DatabaseError, match="3D000"):
+        pg8000.native.Connection(**db_kwargs)
diff -pruN 1.10.6-3/test/native/test_benchmarks.py 1.23.0-0ubuntu1/test/native/test_benchmarks.py
--- 1.10.6-3/test/native/test_benchmarks.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_benchmarks.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,27 @@
+import pytest
+
+
+@pytest.mark.parametrize(
+    "txt",
+    (
+        ("int2", "cast(id / 100 as int2)"),
+        "cast(id as int4)",
+        "cast(id * 100 as int8)",
+        "(id % 2) = 0",
+        "N'Static text string'",
+        "cast(id / 100 as float4)",
+        "cast(id / 100 as float8)",
+        "cast(id / 100 as numeric)",
+        "timestamp '2001-09-28'",
+    ),
+)
+def test_round_trips(con, benchmark, txt):
+    def torun():
+        query = """SELECT {0} AS column1, {0} AS column2, {0} AS column3,
+            {0} AS column4, {0} AS column5, {0} AS column6, {0} AS column7
+            FROM (SELECT generate_series(1, 10000) AS id) AS tbl""".format(
+            txt
+        )
+        con.run(query)
+
+    benchmark(torun)
diff -pruN 1.10.6-3/test/native/test_connection.py 1.23.0-0ubuntu1/test/native/test_connection.py
--- 1.10.6-3/test/native/test_connection.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_connection.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,206 @@
+from datetime import time as Time
+
+import pytest
+
+from pg8000.native import Connection, DatabaseError, InterfaceError
+
+
+def test_unix_socket_missing():
+    conn_params = {"unix_sock": "/file-does-not-exist", "user": "doesn't-matter"}
+
+    with pytest.raises(InterfaceError):
+        Connection(**conn_params)
+
+
+def test_internet_socket_connection_refused():
+    conn_params = {"port": 0, "user": "doesn't-matter"}
+
+    with pytest.raises(
+        InterfaceError,
+        match="Can't create a connection to host localhost and port 0 "
+        "\\(timeout is None and source_address is None\\).",
+    ):
+        Connection(**conn_params)
+
+
+def test_database_missing(db_kwargs):
+    db_kwargs["database"] = "missing-db"
+    with pytest.raises(DatabaseError):
+        Connection(**db_kwargs)
+
+
+def test_notify(con):
+    backend_pid = con.run("select pg_backend_pid()")[0][0]
+    assert list(con.notifications) == []
+    con.run("LISTEN test")
+    con.run("NOTIFY test")
+
+    con.run("VALUES (1, 2), (3, 4), (5, 6)")
+    assert len(con.notifications) == 1
+    assert con.notifications[0] == (backend_pid, "test", "")
+
+
+def test_notify_with_payload(con):
+    backend_pid = con.run("select pg_backend_pid()")[0][0]
+    assert list(con.notifications) == []
+    con.run("LISTEN test")
+    con.run("NOTIFY test, 'Parnham'")
+
+    con.run("VALUES (1, 2), (3, 4), (5, 6)")
+    assert len(con.notifications) == 1
+    assert con.notifications[0] == (backend_pid, "test", "Parnham")
+
+
+# This requires a line in pg_hba.conf that requires md5 for the database
+# pg8000_md5
+
+
+def test_md5(db_kwargs):
+    db_kwargs["database"] = "pg8000_md5"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(DatabaseError, match="3D000"):
+        Connection(**db_kwargs)
+
+
+# This requires a line in pg_hba.conf that requires 'password' for the
+# database pg8000_password
+
+
+def test_password(db_kwargs):
+    db_kwargs["database"] = "pg8000_password"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(DatabaseError, match="3D000"):
+        Connection(**db_kwargs)
+
+
+def test_unicode_databaseName(db_kwargs):
+    db_kwargs["database"] = "pg8000_sn\uFF6Fw"
+
+    # Should only raise an exception saying db doesn't exist
+    with pytest.raises(DatabaseError, match="3D000"):
+        Connection(**db_kwargs)
+
+
+def test_bytes_databaseName(db_kwargs):
+    """Should only raise an exception saying db doesn't exist"""
+
+    db_kwargs["database"] = bytes("pg8000_sn\uFF6Fw", "utf8")
+    with pytest.raises(DatabaseError, match="3D000"):
+        Connection(**db_kwargs)
+
+
+def test_bytes_password(con, db_kwargs):
+    # Create user
+    username = "boltzmann"
+    password = "cha\uFF6Fs"
+    con.run("create user " + username + " with password '" + password + "';")
+
+    db_kwargs["user"] = username
+    db_kwargs["password"] = password.encode("utf8")
+    db_kwargs["database"] = "pg8000_md5"
+    with pytest.raises(DatabaseError, match="3D000"):
+        Connection(**db_kwargs)
+
+    con.run("drop role " + username)
+
+
+def test_broken_pipe_read(con, db_kwargs):
+    db1 = Connection(**db_kwargs)
+    res = db1.run("select pg_backend_pid()")
+    pid1 = res[0][0]
+
+    con.run("select pg_terminate_backend(:v)", v=pid1)
+    with pytest.raises(InterfaceError, match="network error on read"):
+        db1.run("select 1")
+
+
+def test_broken_pipe_unpack(con):
+    res = con.run("select pg_backend_pid()")
+    pid1 = res[0][0]
+
+    with pytest.raises(InterfaceError, match="network error"):
+        con.run("select pg_terminate_backend(:v)", v=pid1)
+
+
+def test_broken_pipe_flush(con, db_kwargs):
+    db1 = Connection(**db_kwargs)
+    res = db1.run("select pg_backend_pid()")
+    pid1 = res[0][0]
+
+    con.run("select pg_terminate_backend(:v)", v=pid1)
+    try:
+        db1.run("select 1")
+    except BaseException:
+        pass
+
+    # Sometimes raises and sometime doesn't
+    try:
+        db1.close()
+    except InterfaceError as e:
+        assert str(e) == "network error on flush"
+
+
+def test_application_name(db_kwargs):
+    app_name = "my test application name"
+    db_kwargs["application_name"] = app_name
+    with Connection(**db_kwargs) as db:
+        res = db.run(
+            "select application_name from pg_stat_activity "
+            " where pid = pg_backend_pid()"
+        )
+
+        application_name = res[0][0]
+        assert application_name == app_name
+
+
+def test_application_name_integer(db_kwargs):
+    db_kwargs["application_name"] = 1
+    with pytest.raises(
+        InterfaceError,
+        match="The parameter application_name can't be of type <class 'int'>.",
+    ):
+        Connection(**db_kwargs)
+
+
+def test_application_name_bytearray(db_kwargs):
+    db_kwargs["application_name"] = bytearray(b"Philby")
+    Connection(**db_kwargs)
+
+
+class PG8000TestException(Exception):
+    pass
+
+
+def raise_exception(val):
+    raise PG8000TestException("oh noes!")
+
+
+def test_py_value_fail(con, mocker):
+    # Ensure that if types.py_value throws an exception, the original
+    # exception is raised (PG8000TestException), and the connection is
+    # still usable after the error.
+    mocker.patch.object(con, "py_types")
+    con.py_types = {Time: raise_exception}
+
+    with pytest.raises(PG8000TestException):
+        con.run("SELECT CAST(:v AS TIME)", v=Time(10, 30))
+
+        # ensure that the connection is still usable for a new query
+        res = con.run("VALUES ('hw3'::text)")
+        assert res[0][0] == "hw3"
+
+
+def test_no_data_error_recovery(con):
+    for i in range(1, 4):
+        with pytest.raises(DatabaseError) as e:
+            con.run("DROP TABLE t1")
+        assert e.value.args[0]["C"] == "42P01"
+        con.run("ROLLBACK")
+
+
+def test_closed_connection(con):
+    con.close()
+    with pytest.raises(InterfaceError, match="connection is closed"):
+        con.run("VALUES ('hw1'::text)")
diff -pruN 1.10.6-3/test/native/test_converters.py 1.23.0-0ubuntu1/test/native/test_converters.py
--- 1.10.6-3/test/native/test_converters.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_converters.py	2021-11-10 18:48:29.000000000 +0000
@@ -0,0 +1,137 @@
+from datetime import date as Date
+from decimal import Decimal
+from ipaddress import IPv4Address, IPv4Network
+
+import pytest
+
+from pg8000.converters import (
+    PGInterval,
+    PY_TYPES,
+    array_out,
+    array_string_escape,
+    interval_in,
+    make_param,
+    null_out,
+    numeric_in,
+    numeric_out,
+    pg_interval_in,
+    string_in,
+    string_out,
+)
+
+
+def test_null_out():
+    assert null_out(None) is None
+
+
+@pytest.mark.parametrize(
+    "array,out",
+    [
+        [[True, False, None], "{true,false,NULL}"],  # bool[]
+        [[IPv4Address("192.168.0.1")], "{192.168.0.1}"],  # inet[]
+        [[Date(2021, 3, 1)], "{2021-03-01}"],  # date[]
+        [[b"\x00\x01\x02\x03\x02\x01\x00"], '{"\\\\x00010203020100"}'],  # bytea[]
+        [[IPv4Network("192.168.0.0/28")], "{192.168.0.0/28}"],  # inet[]
+        [[1, 2, 3], "{1,2,3}"],  # int2[]
+        [[1, None, 3], "{1,NULL,3}"],  # int2[] with None
+        [[[1, 2], [3, 4]], "{{1,2},{3,4}}"],  # int2[] multidimensional
+        [[70000, 2, 3], "{70000,2,3}"],  # int4[]
+        [[7000000000, 2, 3], "{7000000000,2,3}"],  # int8[]
+        [[0, 7000000000, 2], "{0,7000000000,2}"],  # int8[]
+        [[1.1, 2.2, 3.3], "{1.1,2.2,3.3}"],  # float8[]
+        [["Veni", "vidi", "vici"], "{Veni,vidi,vici}"],  # varchar[]
+    ],
+)
+def test_array_out(con, array, out):
+    assert array_out(array) == out
+
+
+@pytest.mark.parametrize(
+    "value",
+    [
+        "1.1",
+        "-1.1",
+        "10000",
+        "20000",
+        "-1000000000.123456789",
+        "1.0",
+        "12.44",
+    ],
+)
+def test_numeric_out(value):
+    assert numeric_out(value) == str(value)
+
+
+@pytest.mark.parametrize(
+    "value",
+    [
+        "1.1",
+        "-1.1",
+        "10000",
+        "20000",
+        "-1000000000.123456789",
+        "1.0",
+        "12.44",
+    ],
+)
+def test_numeric_in(value):
+    assert numeric_in(value) == Decimal(value)
+
+
+@pytest.mark.parametrize(
+    "value",
+    [
+        "hello \u0173 world",
+    ],
+)
+def test_string_out(value):
+    assert string_out(value) == value
+
+
+@pytest.mark.parametrize(
+    "value",
+    [
+        "hello \u0173 world",
+    ],
+)
+def test_string_in(value):
+    assert string_in(value) == value
+
+
+def test_PGInterval_init():
+    i = PGInterval(days=1)
+    assert i.months is None
+    assert i.days == 1
+    assert i.microseconds is None
+
+
+def test_PGInterval_repr():
+    v = PGInterval(microseconds=123456789, days=2, months=24)
+    assert repr(v) == "<PGInterval 24 months 2 days 123456789 microseconds>"
+
+
+def test_PGInterval_str():
+    v = PGInterval(microseconds=123456789, days=2, months=24, millennia=2)
+    assert str(v) == "2 millennia 24 months 2 days 123456789 microseconds"
+
+
+def test_pg_interval_in_1_year():
+    assert pg_interval_in("1 year") == PGInterval(years=1)
+
+
+def test_interval_in_2_months():
+    assert interval_in("2 hours")
+
+
+def test_array_string_escape():
+    v = '"'
+    res = array_string_escape(v)
+    assert res == '"\\""'
+
+
+def test_make_param():
+    class BClass(bytearray):
+        pass
+
+    val = BClass(b"\x00\x01\x02\x03\x02\x01\x00")
+    assert make_param(PY_TYPES, val) == "\\x00010203020100"
diff -pruN 1.10.6-3/test/native/test_copy.py 1.23.0-0ubuntu1/test/native/test_copy.py
--- 1.10.6-3/test/native/test_copy.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_copy.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,102 @@
+from io import BytesIO, StringIO
+
+import pytest
+
+
+@pytest.fixture
+def db_table(request, con):
+    con.run("START TRANSACTION")
+    con.run(
+        "CREATE TEMPORARY TABLE t1 "
+        "(f1 int primary key, f2 int not null, f3 varchar(50) null) "
+        "on commit drop"
+    )
+    return con
+
+
+def test_copy_to_with_table(db_table):
+    db_table.run("INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v1, :v2)", v1=1, v2="1")
+    db_table.run("INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v1, :v2)", v1=2, v2="2")
+    db_table.run("INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v1, :v2)", v1=3, v2="3")
+
+    stream = BytesIO()
+    db_table.run("copy t1 to stdout", stream=stream)
+    assert stream.getvalue() == b"1\t1\t1\n2\t2\t2\n3\t3\t3\n"
+    assert db_table.row_count == 3
+
+
+def test_copy_to_with_query(con):
+    stream = BytesIO()
+    con.run(
+        "COPY (SELECT 1 as One, 2 as Two) TO STDOUT WITH DELIMITER "
+        "'X' CSV HEADER QUOTE AS 'Y' FORCE QUOTE Two",
+        stream=stream,
+    )
+    assert stream.getvalue() == b"oneXtwo\n1XY2Y\n"
+    assert con.row_count == 1
+
+
+def test_copy_to_with_text_stream(con):
+    stream = StringIO()
+    con.run(
+        "COPY (SELECT 1 as One, 2 as Two) TO STDOUT WITH DELIMITER "
+        "'X' CSV HEADER QUOTE AS 'Y' FORCE QUOTE Two",
+        stream=stream,
+    )
+    assert stream.getvalue() == "oneXtwo\n1XY2Y\n"
+    assert con.row_count == 1
+
+
+def test_copy_from_with_table(db_table):
+    stream = BytesIO(b"1\t1\t1\n2\t2\t2\n3\t3\t3\n")
+    db_table.run("copy t1 from STDIN", stream=stream)
+    assert db_table.row_count == 3
+
+    retval = db_table.run("SELECT * FROM t1 ORDER BY f1")
+    assert retval == [[1, 1, "1"], [2, 2, "2"], [3, 3, "3"]]
+
+
+def test_copy_from_with_text_stream(db_table):
+    stream = StringIO("1\t1\t1\n2\t2\t2\n3\t3\t3\n")
+    db_table.run("copy t1 from STDIN", stream=stream)
+
+    retval = db_table.run("SELECT * FROM t1 ORDER BY f1")
+    assert retval == [[1, 1, "1"], [2, 2, "2"], [3, 3, "3"]]
+
+
+def test_copy_from_with_query(db_table):
+    stream = BytesIO(b"f1Xf2\n1XY1Y\n")
+    db_table.run(
+        "COPY t1 (f1, f2) FROM STDIN WITH DELIMITER 'X' CSV HEADER "
+        "QUOTE AS 'Y' FORCE NOT NULL f1",
+        stream=stream,
+    )
+    assert db_table.row_count == 1
+
+    retval = db_table.run("SELECT * FROM t1 ORDER BY f1")
+    assert retval == [[1, 1, None]]
+
+
+def test_copy_from_with_error(db_table):
+    stream = BytesIO(b"f1Xf2\n\n1XY1Y\n")
+    with pytest.raises(BaseException) as e:
+        db_table.run(
+            "COPY t1 (f1, f2) FROM STDIN WITH DELIMITER 'X' CSV HEADER "
+            "QUOTE AS 'Y' FORCE NOT NULL f1",
+            stream=stream,
+        )
+
+    arg = {
+        "S": ("ERROR",),
+        "C": ("22P02",),
+        "M": (
+            'invalid input syntax for type integer: ""',
+            'invalid input syntax for integer: ""',
+        ),
+        "W": ('COPY t1, line 2, column f1: ""',),
+        "F": ("numutils.c",),
+        "R": ("pg_atoi", "pg_strtoint32"),
+    }
+    earg = e.value.args[0]
+    for k, v in arg.items():
+        assert earg[k] in v
diff -pruN 1.10.6-3/test/native/test_core.py 1.23.0-0ubuntu1/test/native/test_core.py
--- 1.10.6-3/test/native/test_core.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_core.py	2021-07-30 12:10:36.000000000 +0000
@@ -0,0 +1,38 @@
+from io import BytesIO
+
+from pg8000.core import (
+    CoreConnection,
+    NULL_BYTE,
+    PASSWORD,
+    _create_message,
+)
+
+
+def test_handle_AUTHENTICATION_3(mocker):
+    """Shouldn't send a FLUSH message, as FLUSH only used in extended-query"""
+
+    mocker.patch.object(CoreConnection, "__init__", lambda x: None)
+    con = CoreConnection()
+    password = "barbour".encode("utf8")
+    con.password = password
+    con._flush = mocker.Mock()
+    buf = BytesIO()
+    con._write = buf.write
+    CoreConnection.handle_AUTHENTICATION_REQUEST(con, b"\x00\x00\x00\x03", None)
+    assert buf.getvalue() == _create_message(PASSWORD, password + NULL_BYTE)
+    # assert buf.getvalue() == b"p\x00\x00\x00\x0cbarbour\x00"
+
+
+def test_create_message():
+    msg = _create_message(PASSWORD, "barbour".encode("utf8") + NULL_BYTE)
+    assert msg == b"p\x00\x00\x00\x0cbarbour\x00"
+
+
+def test_handle_ERROR_RESPONSE(mocker):
+    """Check it handles invalid encodings in the error messages"""
+
+    mocker.patch.object(CoreConnection, "__init__", lambda x: None)
+    con = CoreConnection()
+    con._client_encoding = "utf8"
+    CoreConnection.handle_ERROR_RESPONSE(con, b"S\xc2err" + NULL_BYTE + NULL_BYTE, None)
+    assert str(con.error) == "{'S': '�err'}"
diff -pruN 1.10.6-3/test/native/test_prepared_statement.py 1.23.0-0ubuntu1/test/native/test_prepared_statement.py
--- 1.10.6-3/test/native/test_prepared_statement.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_prepared_statement.py	2021-10-30 13:54:27.000000000 +0000
@@ -0,0 +1,7 @@
+def test_prepare(con):
+    con.prepare("SELECT CAST(:v AS INTEGER)")
+
+
+def test_run(con):
+    ps = con.prepare("SELECT cast(:v as varchar)")
+    ps.run(v="speedy")
diff -pruN 1.10.6-3/test/native/test_query.py 1.23.0-0ubuntu1/test/native/test_query.py
--- 1.10.6-3/test/native/test_query.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_query.py	2021-11-12 20:18:16.000000000 +0000
@@ -0,0 +1,222 @@
+import pytest
+
+from pg8000.native import DatabaseError, to_statement
+
+
+# Tests relating to the basic operation of the database driver, driven by the
+# pg8000 custom interface.
+
+
+@pytest.fixture
+def db_table(request, con):
+    con.run(
+        "CREATE TEMPORARY TABLE t1 (f1 int primary key, "
+        "f2 bigint not null, f3 varchar(50) null) "
+    )
+
+    def fin():
+        try:
+            con.run("drop table t1")
+        except DatabaseError:
+            pass
+
+    request.addfinalizer(fin)
+    return con
+
+
+def test_database_error(con):
+    with pytest.raises(DatabaseError):
+        con.run("INSERT INTO t99 VALUES (1, 2, 3)")
+
+
+# Run a query on a table, alter the structure of the table, then run the
+# original query again.
+
+
+def test_alter(db_table):
+    db_table.run("select * from t1")
+    db_table.run("alter table t1 drop column f3")
+    db_table.run("select * from t1")
+
+
+# Run a query on a table, drop then re-create the table, then run the
+# original query again.
+
+
+def test_create(db_table):
+    db_table.run("select * from t1")
+    db_table.run("drop table t1")
+    db_table.run("create temporary table t1 (f1 int primary key)")
+    db_table.run("select * from t1")
+
+
+def test_parametrized(db_table):
+    res = db_table.run("SELECT f1, f2, f3 FROM t1 WHERE f1 > :f1", f1=3)
+    for row in res:
+        f1, f2, f3 = row
+
+
+def test_insert_returning(db_table):
+    db_table.run("CREATE TEMPORARY TABLE t2 (id serial, data text)")
+
+    # Test INSERT ... RETURNING with one row...
+    res = db_table.run("INSERT INTO t2 (data) VALUES (:v) RETURNING id", v="test1")
+    row_id = res[0][0]
+    res = db_table.run("SELECT data FROM t2 WHERE id = :v", v=row_id)
+    assert "test1" == res[0][0]
+
+    assert db_table.row_count == 1
+
+    # Test with multiple rows...
+    res = db_table.run(
+        "INSERT INTO t2 (data) VALUES (:v1), (:v2), (:v3) " "RETURNING id",
+        v1="test2",
+        v2="test3",
+        v3="test4",
+    )
+    assert db_table.row_count == 3
+    ids = [x[0] for x in res]
+    assert len(ids) == 3
+
+
+def test_row_count_select(db_table):
+    expected_count = 57
+    for i in range(expected_count):
+        db_table.run(
+            "INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=i, v2=i, v3=None
+        )
+
+    db_table.run("SELECT * FROM t1")
+
+    # Check row_count
+    assert expected_count == db_table.row_count
+
+    # Should be -1 for a command with no results
+    db_table.run("DROP TABLE t1")
+    assert -1 == db_table.row_count
+
+
+def test_row_count_delete(db_table):
+    db_table.run(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=1, v2=1, v3=None
+    )
+    db_table.run("DELETE FROM t1")
+    assert db_table.row_count == 1
+
+
+def test_row_count_update(db_table):
+    db_table.run(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=1, v2=1, v3=None
+    )
+    db_table.run(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=2, v2=10, v3=None
+    )
+    db_table.run(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=3, v2=100, v3=None
+    )
+    db_table.run(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=4, v2=1000, v3=None
+    )
+    db_table.run(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=5, v2=10000, v3=None
+    )
+    db_table.run("UPDATE t1 SET f3 = :v1 WHERE f2 > 101", v1="Hello!")
+    assert db_table.row_count == 2
+
+
+def test_int_oid(con):
+    # https://bugs.launchpad.net/pg8000/+bug/230796
+    con.run("SELECT typname FROM pg_type WHERE oid = :v", v=100)
+
+
+def test_unicode_query(con):
+    con.run(
+        "CREATE TEMPORARY TABLE \u043c\u0435\u0441\u0442\u043e "
+        "(\u0438\u043c\u044f VARCHAR(50), "
+        "\u0430\u0434\u0440\u0435\u0441 VARCHAR(250))"
+    )
+
+
+def test_transactions(db_table):
+    db_table.run("start transaction")
+    db_table.run(
+        "INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=1, v2=1, v3="Zombie"
+    )
+    db_table.run("rollback")
+    db_table.run("select * from t1")
+
+    assert db_table.row_count == 0
+
+
+def test_in(con):
+    ret = con.run("SELECT typname FROM pg_type WHERE oid = any(:v)", v=[16, 23])
+    assert ret[0][0] == "bool"
+
+
+# An empty query should raise a ProgrammingError
+def test_empty_query(con):
+    with pytest.raises(DatabaseError):
+        con.run("")
+
+
+def test_rollback_no_transaction(con):
+    # Remove any existing notices
+    con.notices.clear()
+
+    # First, verify that a raw rollback does produce a notice
+    con.run("rollback")
+
+    assert 1 == len(con.notices)
+
+    # 25P01 is the code for no_active_sql_tronsaction. It has
+    # a message and severity name, but those might be
+    # localized/depend on the server version.
+    assert con.notices.pop().get(b"C") == b"25P01"
+
+
+def test_close_prepared_statement(con):
+    ps = con.prepare("select 1")
+    ps.run()
+    res = con.run("select count(*) from pg_prepared_statements")
+    assert res[0][0] == 1  # Should have one prepared statement
+
+    ps.close()
+
+    res = con.run("select count(*) from pg_prepared_statements")
+    assert res[0][0] == 0  # Should have no prepared statements
+
+
+def test_no_data(con):
+    assert con.run("START TRANSACTION") is None
+
+
+def test_multiple_statements(con):
+    statements = "SELECT 5; SELECT 'Erich Fromm';"
+    assert con.run(statements) == [[5], ["Erich Fromm"]]
+
+
+def test_unexecuted_connection_row_count(con):
+    assert con.row_count is None
+
+
+def test_unexecuted_connection_columns(con):
+    assert con.columns is None
+
+
+def test_sql_prepared_statement(con):
+    con.run("PREPARE gen_series AS SELECT generate_series(1, 10);")
+    con.run("EXECUTE gen_series")
+
+
+def test_to_statement():
+    new_query, _ = to_statement(
+        "SELECT sum(x)::decimal(5, 2) :f_2, :f1 FROM t WHERE a=:f_2"
+    )
+    expected = "SELECT sum(x)::decimal(5, 2) $1, $2 FROM t WHERE a=$1"
+    assert new_query == expected
+
+
+def test_not_parsed_if_no_params(mocker, con):
+    mock_to_statement = mocker.patch("pg8000.native.to_statement")
+    con.run("ROLLBACK")
+    mock_to_statement.assert_not_called()
diff -pruN 1.10.6-3/test/native/test_typeconversion.py 1.23.0-0ubuntu1/test/native/test_typeconversion.py
--- 1.10.6-3/test/native/test_typeconversion.py	1970-01-01 00:00:00.000000000 +0000
+++ 1.23.0-0ubuntu1/test/native/test_typeconversion.py	2021-10-13 18:32:47.000000000 +0000
@@ -0,0 +1,647 @@
+import os
+import time
+from collections import OrderedDict
+from datetime import (
+    date as Date,
+    datetime as Datetime,
+    time as Time,
+    timedelta as Timedelta,
+    timezone as Timezone,
+)
+from decimal import Decimal
+from enum import Enum
+from ipaddress import IPv4Address, IPv4Network
+from json import dumps
+from uuid import UUID
+
+import pytest
+
+import pytz
+
+from pg8000.converters import (
+    BIGINT,
+    BIGINT_ARRAY,
+    BOOLEAN,
+    CIDR_ARRAY,
+    DATE,
+    FLOAT_ARRAY,
+    INET,
+    INTEGER_ARRAY,
+    INTERVAL,
+    JSON,
+    JSONB,
+    JSONB_ARRAY,
+    JSON_ARRAY,
+    MONEY,
+    MONEY_ARRAY,
+    NUMERIC,
+    NUMERIC_ARRAY,
+    PGInterval,
+    POINT,
+    SMALLINT_ARRAY,
+    TIME,
+    TIMESTAMP,
+    TIMESTAMPTZ,
+    TIMESTAMPTZ_ARRAY,
+    TIMESTAMP_ARRAY,
+    UUID_ARRAY,
+    UUID_TYPE,
+    XID,
+    pg_interval_in,
+    pg_interval_out,
+    time_in,
+)
+
+
+def test_str_then_int(con):
+    v1 = "hello world"
+    retval = con.run("SELECT cast(:v1 as varchar) as f1", v1=v1)
+    assert retval[0][0] == v1
+
+    v2 = 1
+    retval = con.run("SELECT cast(:v2 as varchar) as f1", v2=v2)
+    assert retval[0][0] == str(v2)
+
+
+def test_insert_null(con):
+    v = None
+    con.run("CREATE TEMPORARY TABLE test_int (f INTEGER)")
+    con.run("INSERT INTO test_int VALUES (:v)", v=v)
+    retval = con.run("SELECT * FROM test_int")
+    assert retval[0][0] == v
+
+
+def test_int_roundtrip(con):
+    int2 = 21
+    int4 = 23
+    int8 = 20
+
+    MAP = {
+        int2: "int2",
+        int4: "int4",
+        int8: "int8",
+    }
+
+    test_values = [
+        (0, int2),
+        (-32767, int2),
+        (-32768, int4),
+        (+32767, int2),
+        (+32768, int4),
+        (-2147483647, int4),
+        (-2147483648, int8),
+        (+2147483647, int4),
+        (+2147483648, int8),
+        (-9223372036854775807, int8),
+        (+9223372036854775807, int8),
+    ]
+
+    for value, typoid in test_values:
+        retval = con.run("SELECT cast(:v as " + MAP[typoid] + ")", v=value)
+        assert retval[0][0] == value
+        column_type_oid = con.columns[0]["type_oid"]
+        assert column_type_oid == typoid
+
+
+def test_bytearray_subclass_round_trip(con):
+    class BClass(bytearray):
+        pass
+
+    binary = b"\x00\x01\x02\x03\x02\x01\x00"
+    retval = con.run("SELECT cast(:v as bytea)", v=BClass(binary))
+    assert retval[0][0] == binary
+
+
+def test_timestamp_roundtrip(is_java, con):
+    v = Datetime(2001, 2, 3, 4, 5, 6, 170000)
+    retval = con.run("SELECT cast(:v as timestamp)", v=v)
+    assert retval[0][0] == v
+
+    # Test that time zone doesn't affect it
+    # Jython 2.5.3 doesn't have a time.tzset() so skip
+    if not is_java:
+        orig_tz = os.environ.get("TZ")
+        os.environ["TZ"] = "America/Edmonton"
+        time.tzset()
+
+        retval = con.run("SELECT cast(:v as timestamp)", v=v)
+        assert retval[0][0] == v
+
+        if orig_tz is None:
+            del os.environ["TZ"]
+        else:
+            os.environ["TZ"] = orig_tz
+        time.tzset()
+
+
+def test_interval_roundtrip(con):
+    con.register_in_adapter(INTERVAL, pg_interval_in)
+    con.register_out_adapter(PGInterval, pg_interval_out)
+    v = PGInterval(microseconds=123456789, days=2, months=24)
+    retval = con.run("SELECT cast(:v as interval)", v=v)
+    assert retval[0][0] == v
+
+
+def test_enum_str_round_trip(con):
+    try:
+        con.run("create type lepton as enum ('electron', 'muon', 'tau')")
+
+        v = "muon"
+        retval = con.run("SELECT cast(:v as lepton) as f1", v=v)
+        assert retval[0][0] == v
+        con.run("CREATE TEMPORARY TABLE testenum (f1 lepton)")
+        con.run("INSERT INTO testenum VALUES (cast(:v as lepton))", v="electron")
+    finally:
+        con.run("drop table testenum")
+        con.run("drop type lepton")
+
+
+def test_enum_custom_round_trip(con):
+    class Lepton:
+        # Implements PEP 435 in the minimal fashion needed
+        __members__ = OrderedDict()
+
+        def __init__(self, name, value, alias=None):
+            self.name = name
+            self.value = value
+            self.__members__[name] = self
+            setattr(self.__class__, name, self)
+            if alias:
+                self.__members__[alias] = self
+                setattr(self.__class__, alias, self)
+
+    def lepton_out(lepton):
+        return lepton.value
+
+    try:
+        con.run("create type lepton as enum ('1', '2', '3')")
+        con.register_out_adapter(Lepton, lepton_out)
+
+        v = Lepton("muon", "2")
+        retval = con.run("SELECT CAST(:v AS lepton)", v=v)
+        assert retval[0][0] == v.value
+    finally:
+        con.run("drop type lepton")
+
+
+def test_enum_py_round_trip(con):
+    class Lepton(Enum):
+        electron = "1"
+        muon = "2"
+        tau = "3"
+
+    try:
+        con.run("create type lepton as enum ('1', '2', '3')")
+
+        v = Lepton.muon
+        retval = con.run("SELECT cast(:v as lepton) as f1", v=v)
+        assert retval[0][0] == v.value
+
+        con.run("CREATE TEMPORARY TABLE testenum (f1 lepton)")
+        con.run("INSERT INTO testenum VALUES (cast(:v as lepton))", v=Lepton.electron)
+    finally:
+        con.run("drop table testenum")
+        con.run("drop type lepton")
+
+
+def test_xml_roundtrip(con):
+    v = "<genome>gatccgagtac</genome>"
+    retval = con.run("select xmlparse(content :v) as f1", v=v)
+    assert retval[0][0] == v
+
+
+def test_int2vector_in(con):
+    retval = con.run("select cast('1 2' as int2vector) as f1")
+    assert retval[0][0] == [1, 2]
+
+    # Should complete without an exception
+    con.run("select indkey from pg_index")
+
+
+def test_timestamp_tz_out(con):
+    retval = con.run(
+        "SELECT '2001-02-03 04:05:06.17 America/Edmonton'" "::timestamp with time zone"
+    )
+    dt = retval[0][0]
+    assert dt.tzinfo is not None, "no tzinfo returned"
+    assert dt.astimezone(Timezone.utc) == Datetime(
+        2001, 2, 3, 11, 5, 6, 170000, Timezone.utc
+    ), "retrieved value match failed"
+
+
+def test_timestamp_tz_roundtrip(is_java, con):
+    if not is_java:
+        mst = pytz.timezone("America/Edmonton")
+        v1 = mst.localize(Datetime(2001, 2, 3, 4, 5, 6, 170000))
+        retval = con.run("SELECT cast(:v as timestamptz)", v=v1)
+        v2 = retval[0][0]
+        assert v2.tzinfo is not None
+        assert v1 == v2
+
+
+def test_timestamp_mismatch(is_java, con):
+    if not is_java:
+        mst = pytz.timezone("America/Edmonton")
+        con.run("SET SESSION TIME ZONE 'America/Edmonton'")
+        try:
+            con.run(
+                "CREATE TEMPORARY TABLE TestTz (f1 timestamp with time zone, "
+                "f2 timestamp without time zone)"
+            )
+            con.run(
+                "INSERT INTO TestTz (f1, f2) VALUES (:v1, :v2)",
+                # insert timestamp into timestamptz field (v1)
+                v1=Datetime(2001, 2, 3, 4, 5, 6, 170000),
+                # insert timestamptz into timestamp field (v2)
+                v2=mst.localize(Datetime(2001, 2, 3, 4, 5, 6, 170000)),
+            )
+            retval = con.run("SELECT f1, f2 FROM TestTz")
+
+            # when inserting a timestamp into a timestamptz field,
+            # postgresql assumes that it is in local time. So the value
+            # that comes out will be the server's local time interpretation
+            # of v1. We've set the server's TZ to MST, the time should
+            # be...
+            f1 = retval[0][0]
+            assert f1 == Datetime(2001, 2, 3, 11, 5, 6, 170000, Timezone.utc)
+
+            # inserting the timestamptz into a timestamp field, pg8000 converts the
+            # value into UTC, and then the PG server sends that time back
+            f2 = retval[0][1]
+            assert f2 == Datetime(2001, 2, 3, 11, 5, 6, 170000)
+        finally:
+            con.run("SET SESSION TIME ZONE DEFAULT")
+
+
+def test_name_out(con):
+    # select a field that is of "name" type:
+    con.run("SELECT usename FROM pg_user")
+    # It is sufficient that no errors were encountered.
+
+
+def test_oid_out(con):
+    con.run("SELECT oid FROM pg_type")
+    # It is sufficient that no errors were encountered.
+
+
+def test_boolean_in(con):
+    retval = con.run("SELECT cast('t' as bool)")
+    assert retval[0][0]
+
+
+def test_numeric_out(con):
+    for num in ("5000", "50.34"):
+        retval = con.run("SELECT " + num + "::numeric")
+        assert str(retval[0][0]) == num
+
+
+def test_int2_out(con):
+    retval = con.run("SELECT 5000::smallint")
+    assert retval[0][0] == 5000
+
+
+def test_int4_out(con):
+    retval = con.run("SELECT 5000::integer")
+    assert retval[0][0] == 5000
+
+
+def test_int8_out(con):
+    retval = con.run("SELECT 50000000000000::bigint")
+    assert retval[0][0] == 50000000000000
+
+
+def test_float4_out(con):
+    retval = con.run("SELECT 1.1::real")
+    assert retval[0][0] == 1.1
+
+
+def test_float8_out(con):
+    retval = con.run("SELECT 1.1::double precision")
+    assert retval[0][0] == 1.1000000000000001
+
+
+def test_varchar_out(con):
+    retval = con.run("SELECT 'hello'::varchar(20)")
+    assert retval[0][0] == "hello"
+
+
+def test_char_out(con):
+    retval = con.run("SELECT 'hello'::char(20)")
+    assert retval[0][0] == "hello               "
+
+
+def test_text_out(con):
+    retval = con.run("SELECT 'hello'::text")
+    assert retval[0][0] == "hello"
+
+
+def test_pg_interval_in(con):
+    con.register_in_adapter(1186, pg_interval_in)
+    retval = con.run(
+        "SELECT CAST('1 month 16 days 12 hours 32 minutes 64 seconds' as INTERVAL)"
+    )
+    expected_value = PGInterval(
+        microseconds=(12 * 60 * 60 * 1000 * 1000)
+        + (32 * 60 * 1000 * 1000)
+        + (64 * 1000 * 1000),
+        days=16,
+        months=1,
+    )
+    assert retval[0][0] == expected_value
+
+
+def test_interval_in_30_seconds(con):
+    retval = con.run("select interval '30 seconds'")
+    assert retval[0][0] == Timedelta(seconds=30)
+
+
+def test_interval_in_12_days_30_seconds(con):
+    retval = con.run("select interval '12 days 30 seconds'")
+    assert retval[0][0] == Timedelta(days=12, seconds=30)
+
+
+def test_timestamp_out(con):
+    retval = con.run("SELECT '2001-02-03 04:05:06.17'::timestamp")
+    assert retval[0][0] == Datetime(2001, 2, 3, 4, 5, 6, 170000)
+
+
+def test_int4_array_out(con):
+    retval = con.run(
+        "SELECT '{1,2,3,4}'::INT[] AS f1, '{{1,2,3},{4,5,6}}'::INT[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT[][][] AS f3"
+    )
+    f1, f2, f3 = retval[0]
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_int2_array_out(con):
+    res = con.run(
+        "SELECT '{1,2,3,4}'::INT2[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::INT2[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT2[][][] AS f3"
+    )
+    f1, f2, f3 = res[0]
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_int8_array_out(con):
+    res = con.run(
+        "SELECT '{1,2,3,4}'::INT8[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::INT8[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::INT8[][][] AS f3"
+    )
+    f1, f2, f3 = res[0]
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_bool_array_out(con):
+    res = con.run(
+        "SELECT '{TRUE,FALSE,FALSE,TRUE}'::BOOL[] AS f1, "
+        "'{{TRUE,FALSE,TRUE},{FALSE,TRUE,FALSE}}'::BOOL[][] AS f2, "
+        "'{{{TRUE,FALSE},{FALSE,TRUE}},{{NULL,TRUE},{FALSE,FALSE}}}'"
+        "::BOOL[][][] AS f3"
+    )
+    f1, f2, f3 = res[0]
+    assert f1 == [True, False, False, True]
+    assert f2 == [[True, False, True], [False, True, False]]
+    assert f3 == [[[True, False], [False, True]], [[None, True], [False, False]]]
+
+
+def test_float4_array_out(con):
+    res = con.run(
+        "SELECT '{1,2,3,4}'::FLOAT4[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::FLOAT4[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::FLOAT4[][][] AS f3"
+    )
+    f1, f2, f3 = res[0]
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+def test_float8_array_out(con):
+    res = con.run(
+        "SELECT '{1,2,3,4}'::FLOAT8[] AS f1, "
+        "'{{1,2,3},{4,5,6}}'::FLOAT8[][] AS f2, "
+        "'{{{1,2},{3,4}},{{NULL,6},{7,8}}}'::FLOAT8[][][] AS f3"
+    )
+    f1, f2, f3 = res[0]
+    assert f1 == [1, 2, 3, 4]
+    assert f2 == [[1, 2, 3], [4, 5, 6]]
+    assert f3 == [[[1, 2], [3, 4]], [[None, 6], [7, 8]]]
+
+
+CURRENCIES = {
+    "en_GB.UTF-8": "£",
+    "C.UTF-8": "$",
+    "C.UTF8": "$",
+}
+LANG = os.environ["LANG"]
+CURRENCY = CURRENCIES[LANG]
+
+
+@pytest.mark.parametrize(
+    "test_input,oid",
+    [
+        [[Datetime(2001, 2, 3, 4, 5, 6)], TIMESTAMP_ARRAY],  # timestamp[]
+        [  # timestamptz[]
+            [Datetime(2001, 2, 3, 4, 5, 6, 0, Timezone.utc)],
+            TIMESTAMPTZ_ARRAY,
+        ],
+        [
+            {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003},
+            # json
+            JSON,
+        ],
+        [{"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}, JSONB],  # jsonb
+        [[IPv4Network("192.168.0.0/28")], CIDR_ARRAY],  # cidr[]
+        [[1, 2, 3], SMALLINT_ARRAY],  # int2[]
+        [[[1, 2], [3, 4]], SMALLINT_ARRAY],  # int2[] multidimensional
+        [[1, None, 3], INTEGER_ARRAY],  # int4[] with None
+        [[7000000000, 2, 3], BIGINT_ARRAY],  # int8[]
+        [[1.1, 2.2, 3.3], FLOAT_ARRAY],  # float8[]
+        [[Decimal("1.1"), None, Decimal("3.3")], NUMERIC_ARRAY],  # numeric[]
+        [[f"{CURRENCY}1.10", None, f"{CURRENCY}3.30"], MONEY_ARRAY],  # money[]
+        [[UUID("911460f2-1f43-fea2-3e2c-e01fd5b5069d")], UUID_ARRAY],  # uuid[]
+        [  # json[]
+            [{"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}],
+            JSON_ARRAY,
+        ],
+        [  # jsonb[]
+            [{"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}],
+            JSONB_ARRAY,
+        ],
+        [Time(4, 5, 6), TIME],  # time
+        [Date(2001, 2, 3), DATE],  # date
+        [Datetime(2001, 2, 3, 4, 5, 6), TIMESTAMP],  # timestamp
+        [Datetime(2001, 2, 3, 4, 5, 6, 0, Timezone.utc), TIMESTAMPTZ],  # timestamptz
+        [True, BOOLEAN],  # bool
+        [None, BOOLEAN],  # null
+        [Decimal("1.1"), NUMERIC],  # numeric
+        [f"{CURRENCY}1.10", MONEY],  # money
+        [f"-{CURRENCY}1.10", MONEY],  # money
+        [50000000000000, BIGINT],  # int8
+        [UUID("911460f2-1f43-fea2-3e2c-e01fd5b5069d"), UUID_TYPE],  # uuid
+        [IPv4Network("192.168.0.0/28"), INET],  # inet
+        [IPv4Address("192.168.0.1"), INET],  # inet
+        [86722, XID],  # xid
+        ["infinity", TIMESTAMP],  # timestamp
+        ["(2.3,1)", POINT],  # point
+        [{"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}, JSON],  # json
+        [{"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}, JSONB],  # jsonb
+    ],
+)
+def test_roundtrip_oid(con, test_input, oid):
+
+    retval = con.run("SELECT :v", v=test_input, types={"v": oid})
+    assert retval[0][0] == test_input
+
+    assert oid == con.columns[0]["type_oid"]
+
+
+@pytest.mark.parametrize(
+    "test_input,typ,required_version",
+    [
+        [[True, False, None], "bool[]", None],
+        [[IPv4Address("192.168.0.1")], "inet[]", None],
+        [[Date(2021, 3, 1)], "date[]", None],
+        [[Datetime(2001, 2, 3, 4, 5, 6)], "timestamp[]", None],
+        [[Datetime(2001, 2, 3, 4, 5, 6, 0, Timezone.utc)], "timestamptz[]", None],
+        [[Time(4, 5, 6)], "time[]", None],
+        [[Timedelta(seconds=30)], "interval[]", None],
+        [[{"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}], "jsonb[]", None],
+        [[b"\x00\x01\x02\x03\x02\x01\x00"], "bytea[]", None],
+        [[Decimal("1.1"), None, Decimal("3.3")], "numeric[]", None],
+        [[UUID("911460f2-1f43-fea2-3e2c-e01fd5b5069d")], "uuid[]", None],
+        [
+            [
+                "Hello!",
+                "World!",
+                "abcdefghijklmnopqrstuvwxyz",
+                "",
+                "A bunch of random characters:",
+                " ~!@#$%^&*()_+`1234567890-=[]\\{}|{;':\",./<>?\t",
+                None,
+            ],
+            "varchar[]",
+            None,
+        ],
+        [Timedelta(seconds=30), "interval", None],
+        [Time(4, 5, 6), "time", None],
+        [Date(2001, 2, 3), "date", None],
+        [Datetime(2001, 2, 3, 4, 5, 6), "timestamp", None],
+        [Datetime(2001, 2, 3, 4, 5, 6, 0, Timezone.utc), "timestamptz", None],
+        [True, "bool", 10],
+        [Decimal("1.1"), "numeric", None],
+        [1.756e-12, "float8", None],
+        [float("inf"), "float8", None],
+        ["hello world", "unknown", 10],
+        ["hello \u0173 world", "varchar", 10],
+        [50000000000000, "int8", None],
+        [b"\x00\x01\x02\x03\x02\x01\x00", "bytea", None],
+        [bytearray(b"\x00\x01\x02\x03\x02\x01\x00"), "bytea", None],
+        [UUID("911460f2-1f43-fea2-3e2c-e01fd5b5069d"), "uuid", None],
+        [IPv4Network("192.168.0.0/28"), "inet", None],
+        [IPv4Address("192.168.0.1"), "inet", None],
+    ],
+)
+def test_roundtrip_cast(con, pg_version, test_input, typ, required_version):
+    if required_version is not None and pg_version < required_version:
+        return
+
+    retval = con.run(f"SELECT CAST(:v AS {typ})", v=test_input)
+    assert retval[0][0] == test_input
+
+
+@pytest.mark.parametrize(
+    "test_input,expected",
+    [
+        ("SELECT CAST('{a,b,c}' AS TEXT[])", ["a", "b", "c"]),
+        ("SELECT CAST('{a,b,c}' AS CHAR[])", ["a", "b", "c"]),
+        ("SELECT CAST('{a,b,c}' AS VARCHAR[])", ["a", "b", "c"]),
+        ("SELECT CAST('{a,b,c}' AS CSTRING[])", ["a", "b", "c"]),
+        ("SELECT CAST('{a,b,c}' AS NAME[])", ["a", "b", "c"]),
+        ("SELECT CAST('{}' AS text[])", []),
+        ('SELECT CAST(\'{NULL,"NULL",NULL,""}\' AS text[])', [None, "NULL", None, ""]),
+    ],
+)
+def test_array_in(con, test_input, expected):
+    result = con.run(test_input)
+    assert result[0][0] == expected
+
+
+def test_numeric_array_out(con):
+    res = con.run("SELECT '{1.1,2.2,3.3}'::numeric[] AS f1")
+    assert res[0][0] == [Decimal("1.1"), Decimal("2.2"), Decimal("3.3")]
+
+
+def test_empty_array(con):
+    v = []
+    retval = con.run("SELECT cast(:v as varchar[])", v=v)
+    assert retval[0][0] == v
+
+
+def test_macaddr(con):
+    retval = con.run("SELECT macaddr '08002b:010203'")
+    assert retval[0][0] == "08:00:2b:01:02:03"
+
+
+def test_tsvector_roundtrip(con):
+    retval = con.run(
+        "SELECT cast(:v as tsvector)", v="a fat cat sat on a mat and ate a fat rat"
+    )
+    assert retval[0][0] == "'a' 'and' 'ate' 'cat' 'fat' 'mat' 'on' 'rat' 'sat'"
+
+
+def test_hstore_roundtrip(con):
+    val = '"a"=>"1"'
+    retval = con.run("SELECT cast(:val as hstore)", val=val)
+    assert retval[0][0] == val
+
+
+def test_json_access_object(con):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    retval = con.run("SELECT cast(:val as json) -> :name", val=dumps(val), name="name")
+    assert retval[0][0] == "Apollo 11 Cave"
+
+
+def test_jsonb_access_object(con):
+    val = {"name": "Apollo 11 Cave", "zebra": True, "age": 26.003}
+    retval = con.run("SELECT cast(:val as jsonb) -> :name", val=dumps(val), name="name")
+    assert retval[0][0] == "Apollo 11 Cave"
+
+
+def test_json_access_array(con):
+    val = [-1, -2, -3, -4, -5]
+    retval = con.run(
+        "SELECT cast(:v1 as json) -> cast(:v2 as int)", v1=dumps(val), v2=2
+    )
+    assert retval[0][0] == -3
+
+
+def test_jsonb_access_array(con):
+    val = [-1, -2, -3, -4, -5]
+    retval = con.run(
+        "SELECT cast(:v1 as jsonb) -> cast(:v2 as int)", v1=dumps(val), v2=2
+    )
+    assert retval[0][0] == -3
+
+
+def test_jsonb_access_path(con):
+    j = {"a": [1, 2, 3], "b": [4, 5, 6]}
+
+    path = ["a", "2"]
+
+    retval = con.run("SELECT cast(:v1 as jsonb) #>> :v2", v1=dumps(j), v2=path)
+    assert retval[0][0] == str(j[path[0]][int(path[1])])
+
+
+def test_time_in():
+    actual = time_in("12:57:18.000396")
+    assert actual == Time(12, 57, 18, 396)
diff -pruN 1.10.6-3/versioneer.py 1.23.0-0ubuntu1/versioneer.py
--- 1.10.6-3/versioneer.py	2016-05-23 20:22:36.000000000 +0000
+++ 1.23.0-0ubuntu1/versioneer.py	2021-07-30 12:10:36.000000000 +0000
@@ -1,21 +1,17 @@
+# Version: 0.19
 
-# Version: 0.15
+"""The Versioneer - like a rocketeer, but for versions.
 
-"""
 The Versioneer
 ==============
 
 * like a rocketeer, but for versions!
-* https://github.com/warner/python-versioneer
+* https://github.com/python-versioneer/python-versioneer
 * Brian Warner
 * License: Public Domain
-* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
-* [![Latest Version]
-(https://pypip.in/version/versioneer/badge.svg?style=flat)
-](https://pypi.python.org/pypi/versioneer/)
-* [![Build Status]
-(https://travis-ci.org/warner/python-versioneer.png?branch=master)
-](https://travis-ci.org/warner/python-versioneer)
+* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
+* [![Latest Version][pypi-image]][pypi-url]
+* [![Build Status][travis-image]][travis-url]
 
 This is a tool for managing a recorded version number in distutils-based
 python projects. The goal is to remove the tedious and error-prone "update
@@ -26,9 +22,10 @@ system, and maybe making new tarballs.
 
 ## Quick Install
 
-* `pip install versioneer` to somewhere to your $PATH
-* add a `[versioneer]` section to your setup.cfg (see below)
+* `pip install versioneer` to somewhere in your $PATH
+* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
 * run `versioneer install` in your source tree, commit the results
+* Verify version information with `python setup.py version`
 
 ## Version Identifiers
 
@@ -60,7 +57,7 @@ version 1.3). Many VCS systems can repor
 for example `git describe --tags --dirty --always` reports things like
 "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
-uncommitted changes.
+uncommitted changes).
 
 The version identifier is used for multiple purposes:
 
@@ -87,125 +84,7 @@ the generated version data.
 
 ## Installation
 
-First, decide on values for the following configuration variables:
-
-* `VCS`: the version control system you use. Currently accepts "git".
-
-* `style`: the style of version string to be produced. See "Styles" below for
-  details. Defaults to "pep440", which looks like
-  `TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
-
-* `versionfile_source`:
-
-  A project-relative pathname into which the generated version strings should
-  be written. This is usually a `_version.py` next to your project's main
-  `__init__.py` file, so it can be imported at runtime. If your project uses
-  `src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
-  This file should be checked in to your VCS as usual: the copy created below
-  by `setup.py setup_versioneer` will include code that parses expanded VCS
-  keywords in generated tarballs. The 'build' and 'sdist' commands will
-  replace it with a copy that has just the calculated version string.
-
-  This must be set even if your project does not have any modules (and will
-  therefore never import `_version.py`), since "setup.py sdist" -based trees
-  still need somewhere to record the pre-calculated version strings. Anywhere
-  in the source tree should do. If there is a `__init__.py` next to your
-  `_version.py`, the `setup.py setup_versioneer` command (described below)
-  will append some `__version__`-setting assignments, if they aren't already
-  present.
-
-* `versionfile_build`:
-
-  Like `versionfile_source`, but relative to the build directory instead of
-  the source directory. These will differ when your setup.py uses
-  'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
-  then you will probably have `versionfile_build='myproject/_version.py'` and
-  `versionfile_source='src/myproject/_version.py'`.
-
-  If this is set to None, then `setup.py build` will not attempt to rewrite
-  any `_version.py` in the built tree. If your project does not have any
-  libraries (e.g. if it only builds a script), then you should use
-  `versionfile_build = None` and override `distutils.command.build_scripts`
-  to explicitly insert a copy of `versioneer.get_version()` into your
-  generated script.
-
-* `tag_prefix`:
-
-  a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
-  If your tags look like 'myproject-1.2.0', then you should use
-  tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
-  should be an empty string.
-
-* `parentdir_prefix`:
-
-  a optional string, frequently the same as tag_prefix, which appears at the
-  start of all unpacked tarball filenames. If your tarball unpacks into
-  'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
-  just omit the field from your `setup.cfg`.
-
-This tool provides one script, named `versioneer`. That script has one mode,
-"install", which writes a copy of `versioneer.py` into the current directory
-and runs `versioneer.py setup` to finish the installation.
-
-To versioneer-enable your project:
-
-* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
-  populating it with the configuration values you decided earlier (note that
-  the option names are not case-sensitive):
-
-  ````
-  [versioneer]
-  VCS = git
-  style = pep440
-  versionfile_source = src/myproject/_version.py
-  versionfile_build = myproject/_version.py
-  tag_prefix = ""
-  parentdir_prefix = myproject-
-  ````
-
-* 2: Run `versioneer install`. This will do the following:
-
-  * copy `versioneer.py` into the top of your source tree
-  * create `_version.py` in the right place (`versionfile_source`)
-  * modify your `__init__.py` (if one exists next to `_version.py`) to define
-    `__version__` (by calling a function from `_version.py`)
-  * modify your `MANIFEST.in` to include both `versioneer.py` and the
-    generated `_version.py` in sdist tarballs
-
-  `versioneer install` will complain about any problems it finds with your
-  `setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
-  the problems.
-
-* 3: add a `import versioneer` to your setup.py, and add the following
-  arguments to the setup() call:
-
-        version=versioneer.get_version(),
-        cmdclass=versioneer.get_cmdclass(),
-
-* 4: commit these changes to your VCS. To make sure you won't forget,
-  `versioneer install` will mark everything it touched for addition using
-  `git add`. Don't forget to add `setup.py` and `setup.cfg` too.
-
-## Post-Installation Usage
-
-Once established, all uses of your tree from a VCS checkout should get the
-current version string. All generated tarballs should include an embedded
-version string (so users who unpack them will not need a VCS tool installed).
-
-If you distribute your project through PyPI, then the release process should
-boil down to two steps:
-
-* 1: git tag 1.0
-* 2: python setup.py register sdist upload
-
-If you distribute it through github (i.e. users use github to generate
-tarballs with `git archive`), the process is:
-
-* 1: git tag 1.0
-* 2: git push; git push --tags
-
-Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
-least one tag in its history.
+See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
 
 ## Version-String Flavors
 
@@ -226,6 +105,10 @@ information:
 * `['full-revisionid']`: detailed revision identifier. For Git, this is the
   full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
 
+* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
+  commit date in ISO 8601 format. This will be None if the date is not
+  available.
+
 * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
   this is only accurate if run in a VCS checkout, otherwise it is likely to
   be False or None
@@ -264,8 +147,8 @@ that this commit is two revisions ("+2")
 software (exactly equal to a known tag), the identifier will only contain the
 stripped tag, e.g. "0.11".
 
-Other styles are available. See details.md in the Versioneer source tree for
-descriptions.
+Other styles are available. See [details.md](details.md) in the Versioneer
+source tree for descriptions.
 
 ## Debugging
 
@@ -275,48 +158,84 @@ version`, which will run the version-loo
 display the full contents of `get_versions()` (including the `error` string,
 which may help identify what went wrong).
 
+## Known Limitations
+
+Some situations are known to cause problems for Versioneer. This details the
+most significant ones. More can be found on Github
+[issues page](https://github.com/python-versioneer/python-versioneer/issues).
+
+### Subprojects
+
+Versioneer has limited support for source trees in which `setup.py` is not in
+the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
+two common reasons why `setup.py` might not be in the root:
+
+* Source trees which contain multiple subprojects, such as
+  [Buildbot](https://github.com/buildbot/buildbot), which contains both
+  "master" and "slave" subprojects, each with their own `setup.py`,
+  `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
+  distributions (and upload multiple independently-installable tarballs).
+* Source trees whose main purpose is to contain a C library, but which also
+  provide bindings to Python (and perhaps other languages) in subdirectories.
+
+Versioneer will look for `.git` in parent directories, and most operations
+should get the right version string. However `pip` and `setuptools` have bugs
+and implementation details which frequently cause `pip install .` from a
+subproject directory to fail to find a correct version string (so it usually
+defaults to `0+unknown`).
+
+`pip install --editable .` should work correctly. `setup.py install` might
+work too.
+
+Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
+some later version.
+
+[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
+this issue. The discussion in
+[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
+issue from the Versioneer side in more detail.
+[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
+[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
+pip to let Versioneer work correctly.
+
+Versioneer-0.16 and earlier only looked for a `.git` directory next to the
+`setup.cfg`, so subprojects were completely unsupported with those releases.
+
+### Editable installs with setuptools <= 18.5
+
+`setup.py develop` and `pip install --editable .` allow you to install a
+project into a virtualenv once, then continue editing the source code (and
+test) without re-installing after every change.
+
+"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
+convenient way to specify executable scripts that should be installed along
+with the python package.
+
+These both work as expected when using modern setuptools. When using
+setuptools-18.5 or earlier, however, certain operations will cause
+`pkg_resources.DistributionNotFound` errors when running the entrypoint
+script, which must be resolved by re-installing the package. This happens
+when the install happens with one version, then the egg_info data is
+regenerated while a different version is checked out. Many setup.py commands
+cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
+a different virtualenv), so this can be surprising.
+
+[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
+this one, but upgrading to a newer version of setuptools should probably
+resolve it.
+
+
 ## Updating Versioneer
 
 To upgrade your project to a new release of Versioneer, do the following:
 
 * install the new Versioneer (`pip install -U versioneer` or equivalent)
 * edit `setup.cfg`, if necessary, to include any new configuration settings
-  indicated by the release notes
+  indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
 * re-run `versioneer install` in your source tree, to replace
   `SRC/_version.py`
 * commit any changed files
 
-### Upgrading to 0.15
-
-Starting with this version, Versioneer is configured with a `[versioneer]`
-section in your `setup.cfg` file. Earlier versions required the `setup.py` to
-set attributes on the `versioneer` module immediately after import. The new
-version will refuse to run (raising an exception during import) until you
-have provided the necessary `setup.cfg` section.
-
-In addition, the Versioneer package provides an executable named
-`versioneer`, and the installation process is driven by running `versioneer
-install`. In 0.14 and earlier, the executable was named
-`versioneer-installer` and was run without an argument.
-
-### Upgrading to 0.14
-
-0.14 changes the format of the version string. 0.13 and earlier used
-hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
-plus-separated "local version" section strings, with dot-separated
-components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
-format, but should be ok with the new one.
-
-### Upgrading from 0.11 to 0.12
-
-Nothing special.
-
-### Upgrading from 0.10 to 0.11
-
-You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
-`setup.py setup_versioneer`. This will enable the use of additional
-version-control systems (SVN, etc) in the future.
-
 ## Future Directions
 
 This tool is designed to make it easily extended to other version-control
@@ -330,20 +249,30 @@ installation by editing setup.py . Alter
 direction and include code from all supported VCS systems, reducing the
 number of intermediate scripts.
 
+## Similar projects
+
+* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
+  dependency
+* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
+  versioneer
 
 ## License
 
-To make Versioneer easier to embed, all its code is hereby released into the
-public domain. The `_version.py` that it creates is also in the public
-domain.
+To make Versioneer easier to embed, all its code is dedicated to the public
+domain. The `_version.py` that it creates is also in the public domain.
+Specifically, both are released under the Creative Commons "Public Domain
+Dedication" license (CC0-1.0), as described in
+https://creativecommons.org/publicdomain/zero/1.0/ .
+
+[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
+[pypi-url]: https://pypi.python.org/pypi/versioneer/
+[travis-image]:
+https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
+[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
 
 """
 
-from __future__ import print_function
-try:
-    import configparser
-except ImportError:
-    import ConfigParser as configparser
+import configparser
 import errno
 import json
 import os
@@ -353,12 +282,15 @@ import sys
 
 
 class VersioneerConfig:
-    pass
+    """Container for Versioneer configuration parameters."""
 
 
 def get_root():
-    # we require that all commands are run from the project root, i.e. the
-    # directory that contains setup.py, setup.cfg, and versioneer.py .
+    """Get the project root directory.
+
+    We require that all commands are run from the project root, i.e. the
+    directory that contains setup.py, setup.cfg, and versioneer.py .
+    """
     root = os.path.realpath(os.path.abspath(os.getcwd()))
     setup_py = os.path.join(root, "setup.py")
     versioneer_py = os.path.join(root, "versioneer.py")
@@ -368,11 +300,13 @@ def get_root():
         setup_py = os.path.join(root, "setup.py")
         versioneer_py = os.path.join(root, "versioneer.py")
     if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
-        err = ("Versioneer was unable to run the project root directory. "
-               "Versioneer requires setup.py to be executed from "
-               "its immediate directory (like 'python setup.py COMMAND'), "
-               "or in a way that lets it use sys.argv[0] to find the root "
-               "(like 'python path/to/setup.py COMMAND').")
+        err = (
+            "Versioneer was unable to run the project root directory. "
+            "Versioneer requires setup.py to be executed from "
+            "its immediate directory (like 'python setup.py COMMAND'), "
+            "or in a way that lets it use sys.argv[0] to find the root "
+            "(like 'python path/to/setup.py COMMAND')."
+        )
         raise VersioneerBadRootError(err)
     try:
         # Certain runtime workflows (setup.py install/develop in a setuptools
@@ -382,42 +316,51 @@ def get_root():
         # os.path.dirname(__file__), as that will find whichever
         # versioneer.py was first imported, even in later projects.
         me = os.path.realpath(os.path.abspath(__file__))
-        if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
-            print("Warning: build in %s is using versioneer.py from %s"
-                  % (os.path.dirname(me), versioneer_py))
+        me_dir = os.path.normcase(os.path.splitext(me)[0])
+        vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
+        if me_dir != vsr_dir:
+            print(
+                "Warning: build in %s is using versioneer.py from %s"
+                % (os.path.dirname(me), versioneer_py)
+            )
     except NameError:
         pass
     return root
 
 
 def get_config_from_root(root):
+    """Read the project setup.cfg file to determine Versioneer config."""
     # This might raise EnvironmentError (if setup.cfg is missing), or
     # configparser.NoSectionError (if it lacks a [versioneer] section), or
     # configparser.NoOptionError (if it lacks "VCS="). See the docstring at
     # the top of versioneer.py for instructions on writing your setup.cfg .
     setup_cfg = os.path.join(root, "setup.cfg")
-    parser = configparser.SafeConfigParser()
+    parser = configparser.ConfigParser()
     with open(setup_cfg, "r") as f:
-        parser.readfp(f)
+        parser.read_file(f)
     VCS = parser.get("versioneer", "VCS")  # mandatory
 
     def get(parser, name):
         if parser.has_option("versioneer", name):
             return parser.get("versioneer", name)
         return None
+
     cfg = VersioneerConfig()
     cfg.VCS = VCS
     cfg.style = get(parser, "style") or ""
     cfg.versionfile_source = get(parser, "versionfile_source")
     cfg.versionfile_build = get(parser, "versionfile_build")
     cfg.tag_prefix = get(parser, "tag_prefix")
+    if cfg.tag_prefix in ("''", '""'):
+        cfg.tag_prefix = ""
     cfg.parentdir_prefix = get(parser, "parentdir_prefix")
     cfg.verbose = get(parser, "verbose")
     return cfg
 
 
 class NotThisMethod(Exception):
-    pass
+    """Exception raised if a method is not valid for the current scenario."""
+
 
 # these dictionaries contain VCS-specific tools
 LONG_VERSION_PY = {}
@@ -425,24 +368,33 @@ HANDLERS = {}
 
 
 def register_vcs_handler(vcs, method):  # decorator
+    """Create decorator to mark a method as the handler of a VCS."""
+
     def decorate(f):
+        """Store f in HANDLERS[vcs][method]."""
         if vcs not in HANDLERS:
             HANDLERS[vcs] = {}
         HANDLERS[vcs][method] = f
         return f
+
     return decorate
 
 
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
+    """Call the given command(s)."""
     assert isinstance(commands, list)
     p = None
     for c in commands:
         try:
             dispcmd = str([c] + args)
             # remember shell=False, so use git.cmd on windows, not just git
-            p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
-                                 stderr=(subprocess.PIPE if hide_stderr
-                                         else None))
+            p = subprocess.Popen(
+                [c] + args,
+                cwd=cwd,
+                env=env,
+                stdout=subprocess.PIPE,
+                stderr=(subprocess.PIPE if hide_stderr else None),
+            )
             break
         except EnvironmentError:
             e = sys.exc_info()[1]
@@ -451,20 +403,23 @@ def run_command(commands, args, cwd=None
             if verbose:
                 print("unable to run %s" % dispcmd)
                 print(e)
-            return None
+            return None, None
     else:
         if verbose:
             print("unable to find command, tried %s" % (commands,))
-        return None
-    stdout = p.communicate()[0].strip()
-    if sys.version_info[0] >= 3:
-        stdout = stdout.decode()
+        return None, None
+    stdout = p.communicate()[0].strip().decode()
     if p.returncode != 0:
         if verbose:
             print("unable to run %s (error)" % dispcmd)
-        return None
-    return stdout
-LONG_VERSION_PY['git'] = '''
+            print("stdout was %s" % stdout)
+        return None, p.returncode
+    return stdout, p.returncode
+
+
+LONG_VERSION_PY[
+    "git"
+] = r'''
 # This file helps to compute a version number in source trees obtained from
 # git-archive tarball (such as those provided by githubs download-from-tag
 # feature). Distribution tarballs (built by setup.py sdist) and build
@@ -472,7 +427,9 @@ LONG_VERSION_PY['git'] = '''
 # that just contains the computed version number.
 
 # This file is released into the public domain. Generated by
-# versioneer-0.15 (https://github.com/warner/python-versioneer)
+# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
+
+"""Git implementation of _version.py."""
 
 import errno
 import os
@@ -482,21 +439,24 @@ import sys
 
 
 def get_keywords():
+    """Get the keywords needed to look up the version information."""
     # these strings will be replaced by git during git-archive.
     # setup.py/versioneer.py will grep for the variable names, so they must
     # each be defined on a line of their own. _version.py will just call
     # get_keywords().
     git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
     git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
-    keywords = {"refnames": git_refnames, "full": git_full}
+    git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
+    keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
     return keywords
 
 
 class VersioneerConfig:
-    pass
+    """Container for Versioneer configuration parameters."""
 
 
 def get_config():
+    """Create, populate and return the VersioneerConfig() object."""
     # these strings are filled in when 'setup.py versioneer' creates
     # _version.py
     cfg = VersioneerConfig()
@@ -510,7 +470,7 @@ def get_config():
 
 
 class NotThisMethod(Exception):
-    pass
+    """Exception raised if a method is not valid for the current scenario."""
 
 
 LONG_VERSION_PY = {}
@@ -518,7 +478,9 @@ HANDLERS = {}
 
 
 def register_vcs_handler(vcs, method):  # decorator
+    """Create decorator to mark a method as the handler of a VCS."""
     def decorate(f):
+        """Store f in HANDLERS[vcs][method]."""
         if vcs not in HANDLERS:
             HANDLERS[vcs] = {}
         HANDLERS[vcs][method] = f
@@ -526,14 +488,17 @@ def register_vcs_handler(vcs, method):
     return decorate
 
 
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
+                env=None):
+    """Call the given command(s)."""
     assert isinstance(commands, list)
     p = None
     for c in commands:
         try:
             dispcmd = str([c] + args)
             # remember shell=False, so use git.cmd on windows, not just git
-            p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+            p = subprocess.Popen([c] + args, cwd=cwd, env=env,
+                                 stdout=subprocess.PIPE,
                                  stderr=(subprocess.PIPE if hide_stderr
                                          else None))
             break
@@ -544,37 +509,48 @@ def run_command(commands, args, cwd=None
             if verbose:
                 print("unable to run %%s" %% dispcmd)
                 print(e)
-            return None
+            return None, None
     else:
         if verbose:
             print("unable to find command, tried %%s" %% (commands,))
-        return None
-    stdout = p.communicate()[0].strip()
-    if sys.version_info[0] >= 3:
-        stdout = stdout.decode()
+        return None, None
+    stdout = p.communicate()[0].strip().decode()
     if p.returncode != 0:
         if verbose:
             print("unable to run %%s (error)" %% dispcmd)
-        return None
-    return stdout
+            print("stdout was %%s" %% stdout)
+        return None, p.returncode
+    return stdout, p.returncode
 
 
 def versions_from_parentdir(parentdir_prefix, root, verbose):
-    # Source tarballs conventionally unpack into a directory that includes
-    # both the project name and a version string.
-    dirname = os.path.basename(root)
-    if not dirname.startswith(parentdir_prefix):
-        if verbose:
-            print("guessing rootdir is '%%s', but '%%s' doesn't start with "
-                  "prefix '%%s'" %% (root, dirname, parentdir_prefix))
-        raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
-    return {"version": dirname[len(parentdir_prefix):],
-            "full-revisionid": None,
-            "dirty": False, "error": None}
+    """Try to determine the version from the parent directory name.
+
+    Source tarballs conventionally unpack into a directory that includes both
+    the project name and a version string. We will also support searching up
+    two directory levels for an appropriately named parent directory
+    """
+    rootdirs = []
+
+    for i in range(3):
+        dirname = os.path.basename(root)
+        if dirname.startswith(parentdir_prefix):
+            return {"version": dirname[len(parentdir_prefix):],
+                    "full-revisionid": None,
+                    "dirty": False, "error": None, "date": None}
+        else:
+            rootdirs.append(root)
+            root = os.path.dirname(root)  # up a level
+
+    if verbose:
+        print("Tried directories %%s but none started with prefix %%s" %%
+              (str(rootdirs), parentdir_prefix))
+    raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
 
 
 @register_vcs_handler("git", "get_keywords")
 def git_get_keywords(versionfile_abs):
+    """Extract version information from the given file."""
     # the code embedded in _version.py can just fetch the value of these
     # keywords. When used from setup.py, we don't want to import _version.py,
     # so we do it with a regexp instead. This function is not used from
@@ -591,6 +567,10 @@ def git_get_keywords(versionfile_abs):
                 mo = re.search(r'=\s*"(.*)"', line)
                 if mo:
                     keywords["full"] = mo.group(1)
+            if line.strip().startswith("git_date ="):
+                mo = re.search(r'=\s*"(.*)"', line)
+                if mo:
+                    keywords["date"] = mo.group(1)
         f.close()
     except EnvironmentError:
         pass
@@ -599,8 +579,22 @@ def git_get_keywords(versionfile_abs):
 
 @register_vcs_handler("git", "keywords")
 def git_versions_from_keywords(keywords, tag_prefix, verbose):
+    """Get version information from git keywords."""
     if not keywords:
         raise NotThisMethod("no keywords at all, weird")
+    date = keywords.get("date")
+    if date is not None:
+        # Use only the last line.  Previous lines may contain GPG signature
+        # information.
+        date = date.splitlines()[-1]
+
+        # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
+        # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
+        # -like" string, which we must then edit to make compliant), because
+        # it's been around since git-1.5.3, and it's too difficult to
+        # discover which version we're using, or to work around using an
+        # older one.
+        date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
     refnames = keywords["refnames"].strip()
     if refnames.startswith("$Format"):
         if verbose:
@@ -621,7 +615,7 @@ def git_versions_from_keywords(keywords,
         # "stabilization", as well as "HEAD" and "master".
         tags = set([r for r in refs if re.search(r'\d', r)])
         if verbose:
-            print("discarding '%%s', no digits" %% ",".join(refs-tags))
+            print("discarding '%%s', no digits" %% ",".join(refs - tags))
     if verbose:
         print("likely tags: %%s" %% ",".join(sorted(tags)))
     for ref in sorted(tags):
@@ -632,41 +626,46 @@ def git_versions_from_keywords(keywords,
                 print("picking %%s" %% r)
             return {"version": r,
                     "full-revisionid": keywords["full"].strip(),
-                    "dirty": False, "error": None
-                    }
+                    "dirty": False, "error": None,
+                    "date": date}
     # no suitable tags, so version is "0+unknown", but full hex is still there
     if verbose:
         print("no suitable tags, using unknown + full revision id")
     return {"version": "0+unknown",
             "full-revisionid": keywords["full"].strip(),
-            "dirty": False, "error": "no suitable tags"}
+            "dirty": False, "error": "no suitable tags", "date": None}
 
 
 @register_vcs_handler("git", "pieces_from_vcs")
 def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
-    # this runs 'git' from the root of the source tree. This only gets called
-    # if the git-archive 'subst' keywords were *not* expanded, and
-    # _version.py hasn't already been rewritten with a short version string,
-    # meaning we're inside a checked out source tree.
-
-    if not os.path.exists(os.path.join(root, ".git")):
-        if verbose:
-            print("no .git in %%s" %% root)
-        raise NotThisMethod("no .git directory")
+    """Get version from 'git describe' in the root of the source tree.
 
+    This only gets called if the git-archive 'subst' keywords were *not*
+    expanded, and _version.py hasn't already been rewritten with a short
+    version string, meaning we're inside a checked out source tree.
+    """
     GITS = ["git"]
     if sys.platform == "win32":
         GITS = ["git.cmd", "git.exe"]
-    # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
-    # if there are no tags, this yields HEX[-dirty] (no NUM)
-    describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
-                                      "--always", "--long"],
-                               cwd=root)
+
+    out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
+                          hide_stderr=True)
+    if rc != 0:
+        if verbose:
+            print("Directory %%s not under git control" %% root)
+        raise NotThisMethod("'git rev-parse --git-dir' returned error")
+
+    # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
+    # if there isn't one, this yields HEX[-dirty] (no NUM)
+    describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
+                                          "--always", "--long",
+                                          "--match", "%%s*" %% tag_prefix],
+                                   cwd=root)
     # --long was added in git-1.5.5
     if describe_out is None:
         raise NotThisMethod("'git describe' failed")
     describe_out = describe_out.strip()
-    full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+    full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
     if full_out is None:
         raise NotThisMethod("'git rev-parse' failed")
     full_out = full_out.strip()
@@ -717,27 +716,37 @@ def git_pieces_from_vcs(tag_prefix, root
     else:
         # HEX: no tags
         pieces["closest-tag"] = None
-        count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
-                                cwd=root)
+        count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
+                                    cwd=root)
         pieces["distance"] = int(count_out)  # total number of commits
 
+    # commit date: see ISO-8601 comment in git_versions_from_keywords()
+    date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
+                       cwd=root)[0].strip()
+    # Use only the last line.  Previous lines may contain GPG signature
+    # information.
+    date = date.splitlines()[-1]
+    pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+
     return pieces
 
 
 def plus_or_dot(pieces):
+    """Return a + if we don't already have one, else return a ."""
     if "+" in pieces.get("closest-tag", ""):
         return "."
     return "+"
 
 
 def render_pep440(pieces):
-    # now build up version string, with post-release "local version
-    # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
-    # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+    """Build up version string, with post-release "local version identifier".
 
-    # exceptions:
-    # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
 
+    Exceptions:
+    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"] or pieces["dirty"]:
@@ -755,30 +764,31 @@ def render_pep440(pieces):
 
 
 def render_pep440_pre(pieces):
-    # TAG[.post.devDISTANCE] . No -dirty
-
-    # exceptions:
-    # 1: no tags. 0.post.devDISTANCE
+    """TAG[.post0.devDISTANCE] -- No -dirty.
 
+    Exceptions:
+    1: no tags. 0.post0.devDISTANCE
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"]:
-            rendered += ".post.dev%%d" %% pieces["distance"]
+            rendered += ".post0.dev%%d" %% pieces["distance"]
     else:
         # exception #1
-        rendered = "0.post.dev%%d" %% pieces["distance"]
+        rendered = "0.post0.dev%%d" %% pieces["distance"]
     return rendered
 
 
 def render_pep440_post(pieces):
-    # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
-    # .dev0 sorts backwards (a dirty tree will appear "older" than the
-    # corresponding clean one), but you shouldn't be releasing software with
-    # -dirty anyways.
-
-    # exceptions:
-    # 1: no tags. 0.postDISTANCE[.dev0]
+    """TAG[.postDISTANCE[.dev0]+gHEX] .
 
+    The ".dev0" means dirty. Note that .dev0 sorts backwards
+    (a dirty tree will appear "older" than the corresponding clean one),
+    but you shouldn't be releasing software with -dirty anyways.
+
+    Exceptions:
+    1: no tags. 0.postDISTANCE[.dev0]
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"] or pieces["dirty"]:
@@ -797,11 +807,13 @@ def render_pep440_post(pieces):
 
 
 def render_pep440_old(pieces):
-    # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+    """TAG[.postDISTANCE[.dev0]] .
 
-    # exceptions:
-    # 1: no tags. 0.postDISTANCE[.dev0]
+    The ".dev0" means dirty.
 
+    Exceptions:
+    1: no tags. 0.postDISTANCE[.dev0]
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"] or pieces["dirty"]:
@@ -817,12 +829,13 @@ def render_pep440_old(pieces):
 
 
 def render_git_describe(pieces):
-    # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
-    # --always'
+    """TAG[-DISTANCE-gHEX][-dirty].
 
-    # exceptions:
-    # 1: no tags. HEX[-dirty]  (note: no 'g' prefix)
+    Like 'git describe --tags --dirty --always'.
 
+    Exceptions:
+    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"]:
@@ -836,12 +849,14 @@ def render_git_describe(pieces):
 
 
 def render_git_describe_long(pieces):
-    # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
-    # --always -long'. The distance/hash is unconditional.
+    """TAG-DISTANCE-gHEX[-dirty].
 
-    # exceptions:
-    # 1: no tags. HEX[-dirty]  (note: no 'g' prefix)
+    Like 'git describe --tags --dirty --always -long'.
+    The distance/hash is unconditional.
 
+    Exceptions:
+    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
@@ -854,11 +869,13 @@ def render_git_describe_long(pieces):
 
 
 def render(pieces, style):
+    """Render the given version pieces into the requested style."""
     if pieces["error"]:
         return {"version": "unknown",
                 "full-revisionid": pieces.get("long"),
                 "dirty": None,
-                "error": pieces["error"]}
+                "error": pieces["error"],
+                "date": None}
 
     if not style or style == "default":
         style = "pep440"  # the default
@@ -879,10 +896,12 @@ def render(pieces, style):
         raise ValueError("unknown style '%%s'" %% style)
 
     return {"version": rendered, "full-revisionid": pieces["long"],
-            "dirty": pieces["dirty"], "error": None}
+            "dirty": pieces["dirty"], "error": None,
+            "date": pieces.get("date")}
 
 
 def get_versions():
+    """Get version information or return default if unable to do so."""
     # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
     # __file__, we can work backwards from there to the root. Some
     # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
@@ -907,7 +926,8 @@ def get_versions():
     except NameError:
         return {"version": "0+unknown", "full-revisionid": None,
                 "dirty": None,
-                "error": "unable to find root of source tree"}
+                "error": "unable to find root of source tree",
+                "date": None}
 
     try:
         pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
@@ -923,12 +943,13 @@ def get_versions():
 
     return {"version": "0+unknown", "full-revisionid": None,
             "dirty": None,
-            "error": "unable to compute version"}
+            "error": "unable to compute version", "date": None}
 '''
 
 
 @register_vcs_handler("git", "get_keywords")
 def git_get_keywords(versionfile_abs):
+    """Extract version information from the given file."""
     # the code embedded in _version.py can just fetch the value of these
     # keywords. When used from setup.py, we don't want to import _version.py,
     # so we do it with a regexp instead. This function is not used from
@@ -945,6 +966,10 @@ def git_get_keywords(versionfile_abs):
                 mo = re.search(r'=\s*"(.*)"', line)
                 if mo:
                     keywords["full"] = mo.group(1)
+            if line.strip().startswith("git_date ="):
+                mo = re.search(r'=\s*"(.*)"', line)
+                if mo:
+                    keywords["date"] = mo.group(1)
         f.close()
     except EnvironmentError:
         pass
@@ -953,8 +978,22 @@ def git_get_keywords(versionfile_abs):
 
 @register_vcs_handler("git", "keywords")
 def git_versions_from_keywords(keywords, tag_prefix, verbose):
+    """Get version information from git keywords."""
     if not keywords:
         raise NotThisMethod("no keywords at all, weird")
+    date = keywords.get("date")
+    if date is not None:
+        # Use only the last line.  Previous lines may contain GPG signature
+        # information.
+        date = date.splitlines()[-1]
+
+        # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
+        # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
+        # -like" string, which we must then edit to make compliant), because
+        # it's been around since git-1.5.3, and it's too difficult to
+        # discover which version we're using, or to work around using an
+        # older one.
+        date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
     refnames = keywords["refnames"].strip()
     if refnames.startswith("$Format"):
         if verbose:
@@ -964,7 +1003,7 @@ def git_versions_from_keywords(keywords,
     # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
     # just "foo-1.0". If we see a "tag: " prefix, prefer those.
     TAG = "tag: "
-    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+    tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
     if not tags:
         # Either we're using git < 1.8.3, or there really are no tags. We use
         # a heuristic: assume all version tags have a digit. The old git %d
@@ -973,54 +1012,74 @@ def git_versions_from_keywords(keywords,
         # between branches and tags. By ignoring refnames without digits, we
         # filter out many common branch names like "release" and
         # "stabilization", as well as "HEAD" and "master".
-        tags = set([r for r in refs if re.search(r'\d', r)])
+        tags = set([r for r in refs if re.search(r"\d", r)])
         if verbose:
-            print("discarding '%s', no digits" % ",".join(refs-tags))
+            print("discarding '%s', no digits" % ",".join(refs - tags))
     if verbose:
         print("likely tags: %s" % ",".join(sorted(tags)))
     for ref in sorted(tags):
         # sorting will prefer e.g. "2.0" over "2.0rc1"
         if ref.startswith(tag_prefix):
-            r = ref[len(tag_prefix):]
+            r = ref[len(tag_prefix) :]
             if verbose:
                 print("picking %s" % r)
-            return {"version": r,
-                    "full-revisionid": keywords["full"].strip(),
-                    "dirty": False, "error": None
-                    }
+            return {
+                "version": r,
+                "full-revisionid": keywords["full"].strip(),
+                "dirty": False,
+                "error": None,
+                "date": date,
+            }
     # no suitable tags, so version is "0+unknown", but full hex is still there
     if verbose:
         print("no suitable tags, using unknown + full revision id")
-    return {"version": "0+unknown",
-            "full-revisionid": keywords["full"].strip(),
-            "dirty": False, "error": "no suitable tags"}
+    return {
+        "version": "0+unknown",
+        "full-revisionid": keywords["full"].strip(),
+        "dirty": False,
+        "error": "no suitable tags",
+        "date": None,
+    }
 
 
 @register_vcs_handler("git", "pieces_from_vcs")
 def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
-    # this runs 'git' from the root of the source tree. This only gets called
-    # if the git-archive 'subst' keywords were *not* expanded, and
-    # _version.py hasn't already been rewritten with a short version string,
-    # meaning we're inside a checked out source tree.
-
-    if not os.path.exists(os.path.join(root, ".git")):
-        if verbose:
-            print("no .git in %s" % root)
-        raise NotThisMethod("no .git directory")
+    """Get version from 'git describe' in the root of the source tree.
 
+    This only gets called if the git-archive 'subst' keywords were *not*
+    expanded, and _version.py hasn't already been rewritten with a short
+    version string, meaning we're inside a checked out source tree.
+    """
     GITS = ["git"]
     if sys.platform == "win32":
         GITS = ["git.cmd", "git.exe"]
-    # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
-    # if there are no tags, this yields HEX[-dirty] (no NUM)
-    describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
-                                      "--always", "--long"],
-                               cwd=root)
+
+    out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
+    if rc != 0:
+        if verbose:
+            print("Directory %s not under git control" % root)
+        raise NotThisMethod("'git rev-parse --git-dir' returned error")
+
+    # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
+    # if there isn't one, this yields HEX[-dirty] (no NUM)
+    describe_out, rc = run_command(
+        GITS,
+        [
+            "describe",
+            "--tags",
+            "--dirty",
+            "--always",
+            "--long",
+            "--match",
+            "%s*" % tag_prefix,
+        ],
+        cwd=root,
+    )
     # --long was added in git-1.5.5
     if describe_out is None:
         raise NotThisMethod("'git describe' failed")
     describe_out = describe_out.strip()
-    full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+    full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
     if full_out is None:
         raise NotThisMethod("'git rev-parse' failed")
     full_out = full_out.strip()
@@ -1038,17 +1097,16 @@ def git_pieces_from_vcs(tag_prefix, root
     dirty = git_describe.endswith("-dirty")
     pieces["dirty"] = dirty
     if dirty:
-        git_describe = git_describe[:git_describe.rindex("-dirty")]
+        git_describe = git_describe[: git_describe.rindex("-dirty")]
 
     # now we have TAG-NUM-gHEX or HEX
 
     if "-" in git_describe:
         # TAG-NUM-gHEX
-        mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+        mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
         if not mo:
             # unparseable. Maybe git-describe is misbehaving?
-            pieces["error"] = ("unable to parse git-describe output: '%s'"
-                               % describe_out)
+            pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
             return pieces
 
         # tag
@@ -1057,10 +1115,12 @@ def git_pieces_from_vcs(tag_prefix, root
             if verbose:
                 fmt = "tag '%s' doesn't start with prefix '%s'"
                 print(fmt % (full_tag, tag_prefix))
-            pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
-                               % (full_tag, tag_prefix))
+            pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
+                full_tag,
+                tag_prefix,
+            )
             return pieces
-        pieces["closest-tag"] = full_tag[len(tag_prefix):]
+        pieces["closest-tag"] = full_tag[len(tag_prefix) :]
 
         # distance: number of commits since tag
         pieces["distance"] = int(mo.group(2))
@@ -1071,14 +1131,27 @@ def git_pieces_from_vcs(tag_prefix, root
     else:
         # HEX: no tags
         pieces["closest-tag"] = None
-        count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
-                                cwd=root)
+        count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
         pieces["distance"] = int(count_out)  # total number of commits
 
+    # commit date: see ISO-8601 comment in git_versions_from_keywords()
+    date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
+        0
+    ].strip()
+    # Use only the last line.  Previous lines may contain GPG signature
+    # information.
+    date = date.splitlines()[-1]
+    pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+
     return pieces
 
 
 def do_vcs_install(manifest_in, versionfile_source, ipy):
+    """Git-specific installation logic for Versioneer.
+
+    For Git, this means creating/changing .gitattributes to mark _version.py
+    for export-subst keyword substitution.
+    """
     GITS = ["git"]
     if sys.platform == "win32":
         GITS = ["git.cmd", "git.exe"]
@@ -1112,26 +1185,43 @@ def do_vcs_install(manifest_in, versionf
 
 
 def versions_from_parentdir(parentdir_prefix, root, verbose):
-    # Source tarballs conventionally unpack into a directory that includes
-    # both the project name and a version string.
-    dirname = os.path.basename(root)
-    if not dirname.startswith(parentdir_prefix):
-        if verbose:
-            print("guessing rootdir is '%s', but '%s' doesn't start with "
-                  "prefix '%s'" % (root, dirname, parentdir_prefix))
-        raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
-    return {"version": dirname[len(parentdir_prefix):],
-            "full-revisionid": None,
-            "dirty": False, "error": None}
+    """Try to determine the version from the parent directory name.
+
+    Source tarballs conventionally unpack into a directory that includes both
+    the project name and a version string. We will also support searching up
+    two directory levels for an appropriately named parent directory
+    """
+    rootdirs = []
+
+    for i in range(3):
+        dirname = os.path.basename(root)
+        if dirname.startswith(parentdir_prefix):
+            return {
+                "version": dirname[len(parentdir_prefix) :],
+                "full-revisionid": None,
+                "dirty": False,
+                "error": None,
+                "date": None,
+            }
+        else:
+            rootdirs.append(root)
+            root = os.path.dirname(root)  # up a level
+
+    if verbose:
+        print(
+            "Tried directories %s but none started with prefix %s"
+            % (str(rootdirs), parentdir_prefix)
+        )
+    raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
+
 
 SHORT_VERSION_PY = """
-# This file was generated by 'versioneer.py' (0.15) from
+# This file was generated by 'versioneer.py' (0.19) from
 # revision-control system data, or from the parent directory name of an
 # unpacked source archive. Distribution tarballs contain a pre-generated copy
 # of this file.
 
 import json
-import sys
 
 version_json = '''
 %s
@@ -1144,22 +1234,28 @@ def get_versions():
 
 
 def versions_from_file(filename):
+    """Try to determine the version from _version.py if present."""
     try:
         with open(filename) as f:
             contents = f.read()
     except EnvironmentError:
         raise NotThisMethod("unable to read _version.py")
-    mo = re.search(r"version_json = '''\n(.*)'''  # END VERSION_JSON",
-                   contents, re.M | re.S)
+    mo = re.search(
+        r"version_json = '''\n(.*)'''  # END VERSION_JSON", contents, re.M | re.S
+    )
+    if not mo:
+        mo = re.search(
+            r"version_json = '''\r\n(.*)'''  # END VERSION_JSON", contents, re.M | re.S
+        )
     if not mo:
         raise NotThisMethod("no version_json in _version.py")
     return json.loads(mo.group(1))
 
 
 def write_to_version_file(filename, versions):
+    """Write the given version number to the given _version.py file."""
     os.unlink(filename)
-    contents = json.dumps(versions, sort_keys=True,
-                          indent=1, separators=(",", ": "))
+    contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
     with open(filename, "w") as f:
         f.write(SHORT_VERSION_PY % contents)
 
@@ -1167,19 +1263,21 @@ def write_to_version_file(filename, vers
 
 
 def plus_or_dot(pieces):
+    """Return a + if we don't already have one, else return a ."""
     if "+" in pieces.get("closest-tag", ""):
         return "."
     return "+"
 
 
 def render_pep440(pieces):
-    # now build up version string, with post-release "local version
-    # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
-    # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+    """Build up version string, with post-release "local version identifier".
 
-    # exceptions:
-    # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
 
+    Exceptions:
+    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"] or pieces["dirty"]:
@@ -1189,38 +1287,38 @@ def render_pep440(pieces):
                 rendered += ".dirty"
     else:
         # exception #1
-        rendered = "0+untagged.%d.g%s" % (pieces["distance"],
-                                          pieces["short"])
+        rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
         if pieces["dirty"]:
             rendered += ".dirty"
     return rendered
 
 
 def render_pep440_pre(pieces):
-    # TAG[.post.devDISTANCE] . No -dirty
-
-    # exceptions:
-    # 1: no tags. 0.post.devDISTANCE
+    """TAG[.post0.devDISTANCE] -- No -dirty.
 
+    Exceptions:
+    1: no tags. 0.post0.devDISTANCE
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"]:
-            rendered += ".post.dev%d" % pieces["distance"]
+            rendered += ".post0.dev%d" % pieces["distance"]
     else:
         # exception #1
-        rendered = "0.post.dev%d" % pieces["distance"]
+        rendered = "0.post0.dev%d" % pieces["distance"]
     return rendered
 
 
 def render_pep440_post(pieces):
-    # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
-    # .dev0 sorts backwards (a dirty tree will appear "older" than the
-    # corresponding clean one), but you shouldn't be releasing software with
-    # -dirty anyways.
-
-    # exceptions:
-    # 1: no tags. 0.postDISTANCE[.dev0]
+    """TAG[.postDISTANCE[.dev0]+gHEX] .
 
+    The ".dev0" means dirty. Note that .dev0 sorts backwards
+    (a dirty tree will appear "older" than the corresponding clean one),
+    but you shouldn't be releasing software with -dirty anyways.
+
+    Exceptions:
+    1: no tags. 0.postDISTANCE[.dev0]
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"] or pieces["dirty"]:
@@ -1239,11 +1337,13 @@ def render_pep440_post(pieces):
 
 
 def render_pep440_old(pieces):
-    # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+    """TAG[.postDISTANCE[.dev0]] .
 
-    # exceptions:
-    # 1: no tags. 0.postDISTANCE[.dev0]
+    The ".dev0" means dirty.
 
+    Exceptions:
+    1: no tags. 0.postDISTANCE[.dev0]
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"] or pieces["dirty"]:
@@ -1259,12 +1359,13 @@ def render_pep440_old(pieces):
 
 
 def render_git_describe(pieces):
-    # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
-    # --always'
+    """TAG[-DISTANCE-gHEX][-dirty].
 
-    # exceptions:
-    # 1: no tags. HEX[-dirty]  (note: no 'g' prefix)
+    Like 'git describe --tags --dirty --always'.
 
+    Exceptions:
+    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         if pieces["distance"]:
@@ -1278,12 +1379,14 @@ def render_git_describe(pieces):
 
 
 def render_git_describe_long(pieces):
-    # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
-    # --always -long'. The distance/hash is unconditional.
+    """TAG-DISTANCE-gHEX[-dirty].
 
-    # exceptions:
-    # 1: no tags. HEX[-dirty]  (note: no 'g' prefix)
+    Like 'git describe --tags --dirty --always -long'.
+    The distance/hash is unconditional.
 
+    Exceptions:
+    1: no tags. HEX[-dirty]  (note: no 'g' prefix)
+    """
     if pieces["closest-tag"]:
         rendered = pieces["closest-tag"]
         rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
@@ -1296,11 +1399,15 @@ def render_git_describe_long(pieces):
 
 
 def render(pieces, style):
+    """Render the given version pieces into the requested style."""
     if pieces["error"]:
-        return {"version": "unknown",
-                "full-revisionid": pieces.get("long"),
-                "dirty": None,
-                "error": pieces["error"]}
+        return {
+            "version": "unknown",
+            "full-revisionid": pieces.get("long"),
+            "dirty": None,
+            "error": pieces["error"],
+            "date": None,
+        }
 
     if not style or style == "default":
         style = "pep440"  # the default
@@ -1320,17 +1427,24 @@ def render(pieces, style):
     else:
         raise ValueError("unknown style '%s'" % style)
 
-    return {"version": rendered, "full-revisionid": pieces["long"],
-            "dirty": pieces["dirty"], "error": None}
+    return {
+        "version": rendered,
+        "full-revisionid": pieces["long"],
+        "dirty": pieces["dirty"],
+        "error": None,
+        "date": pieces.get("date"),
+    }
 
 
 class VersioneerBadRootError(Exception):
-    pass
+    """The project root directory is unknown or missing key files."""
 
 
 def get_versions(verbose=False):
-    # returns dict with two keys: 'version' and 'full'
+    """Get the project version from whatever source is available.
 
+    Returns dict with two keys: 'version' and 'full'.
+    """
     if "versioneer" in sys.modules:
         # see the discussion in cmdclass.py:get_cmdclass()
         del sys.modules["versioneer"]
@@ -1342,8 +1456,9 @@ def get_versions(verbose=False):
     handlers = HANDLERS.get(cfg.VCS)
     assert handlers, "unrecognized VCS '%s'" % cfg.VCS
     verbose = verbose or cfg.verbose
-    assert cfg.versionfile_source is not None, \
-        "please set versioneer.versionfile_source"
+    assert (
+        cfg.versionfile_source is not None
+    ), "please set versioneer.versionfile_source"
     assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
 
     versionfile_abs = os.path.join(root, cfg.versionfile_source)
@@ -1397,15 +1512,26 @@ def get_versions(verbose=False):
     if verbose:
         print("unable to compute version")
 
-    return {"version": "0+unknown", "full-revisionid": None,
-            "dirty": None, "error": "unable to compute version"}
+    return {
+        "version": "0+unknown",
+        "full-revisionid": None,
+        "dirty": None,
+        "error": "unable to compute version",
+        "date": None,
+    }
 
 
 def get_version():
+    """Get the short version string for this project."""
     return get_versions()["version"]
 
 
-def get_cmdclass():
+def get_cmdclass(cmdclass=None):
+    """Get the custom setuptools/distutils subclasses used by Versioneer.
+
+    If the package uses a different cmdclass (e.g. one from numpy), it
+    should be provide as an argument.
+    """
     if "versioneer" in sys.modules:
         del sys.modules["versioneer"]
         # this fixes the "python setup.py develop" case (also 'install' and
@@ -1419,9 +1545,9 @@ def get_cmdclass():
         # parent is protected against the child's "import versioneer". By
         # removing ourselves from sys.modules here, before the child build
         # happens, we protect the child from the parent's versioneer too.
-        # Also see https://github.com/warner/python-versioneer/issues/52
+        # Also see https://github.com/python-versioneer/python-versioneer/issues/52
 
-    cmds = {}
+    cmds = {} if cmdclass is None else cmdclass.copy()
 
     # we add "version" to both distutils and setuptools
     from distutils.core import Command
@@ -1442,8 +1568,10 @@ def get_cmdclass():
             print("Version: %s" % vers["version"])
             print(" full-revisionid: %s" % vers.get("full-revisionid"))
             print(" dirty: %s" % vers.get("dirty"))
+            print(" date: %s" % vers.get("date"))
             if vers["error"]:
                 print(" error: %s" % vers["error"])
+
     cmds["version"] = cmd_version
 
     # we override "build_py" in both distutils and setuptools
@@ -1455,8 +1583,19 @@ def get_cmdclass():
     #  setuptools/bdist_egg -> distutils/install_lib -> build_py
     #  setuptools/install -> bdist_egg ->..
     #  setuptools/develop -> ?
-
-    from distutils.command.build_py import build_py as _build_py
+    #  pip install:
+    #   copies source tree to a tempdir before running egg_info/etc
+    #   if .git isn't copied too, 'git describe' will fail
+    #   then does setup.py bdist_wheel, or sometimes setup.py install
+    #  setup.py egg_info -> ?
+
+    # we override different "build_py" commands for both environments
+    if "build_py" in cmds:
+        _build_py = cmds["build_py"]
+    elif "setuptools" in sys.modules:
+        from setuptools.command.build_py import build_py as _build_py
+    else:
+        from distutils.command.build_py import build_py as _build_py
 
     class cmd_build_py(_build_py):
         def run(self):
@@ -1467,15 +1606,47 @@ def get_cmdclass():
             # now locate _version.py in the new build/ directory and replace
             # it with an updated value
             if cfg.versionfile_build:
-                target_versionfile = os.path.join(self.build_lib,
-                                                  cfg.versionfile_build)
+                target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
                 print("UPDATING %s" % target_versionfile)
                 write_to_version_file(target_versionfile, versions)
+
     cmds["build_py"] = cmd_build_py
 
+    if "setuptools" in sys.modules:
+        from setuptools.command.build_ext import build_ext as _build_ext
+    else:
+        from distutils.command.build_ext import build_ext as _build_ext
+
+    class cmd_build_ext(_build_ext):
+        def run(self):
+            root = get_root()
+            cfg = get_config_from_root(root)
+            versions = get_versions()
+            _build_ext.run(self)
+            if self.inplace:
+                # build_ext --inplace will only build extensions in
+                # build/lib<..> dir with no _version.py to write to.
+                # As in place builds will already have a _version.py
+                # in the module dir, we do not need to write one.
+                return
+            # now locate _version.py in the new build/ directory and replace
+            # it with an updated value
+            target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source)
+            print("UPDATING %s" % target_versionfile)
+            write_to_version_file(target_versionfile, versions)
+
+    cmds["build_ext"] = cmd_build_ext
+
     if "cx_Freeze" in sys.modules:  # cx_freeze enabled?
         from cx_Freeze.dist import build_exe as _build_exe
 
+        # nczeczulin reports that py2exe won't like the pep440-style string
+        # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
+        # setup(console=[{
+        #   "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
+        #   "product_version": versioneer.get_version(),
+        #   ...
+
         class cmd_build_exe(_build_exe):
             def run(self):
                 root = get_root()
@@ -1489,18 +1660,53 @@ def get_cmdclass():
                 os.unlink(target_versionfile)
                 with open(cfg.versionfile_source, "w") as f:
                     LONG = LONG_VERSION_PY[cfg.VCS]
-                    f.write(LONG %
-                            {"DOLLAR": "$",
-                             "STYLE": cfg.style,
-                             "TAG_PREFIX": cfg.tag_prefix,
-                             "PARENTDIR_PREFIX": cfg.parentdir_prefix,
-                             "VERSIONFILE_SOURCE": cfg.versionfile_source,
-                             })
+                    f.write(
+                        LONG
+                        % {
+                            "DOLLAR": "$",
+                            "STYLE": cfg.style,
+                            "TAG_PREFIX": cfg.tag_prefix,
+                            "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+                            "VERSIONFILE_SOURCE": cfg.versionfile_source,
+                        }
+                    )
+
         cmds["build_exe"] = cmd_build_exe
         del cmds["build_py"]
 
+    if "py2exe" in sys.modules:  # py2exe enabled?
+        from py2exe.distutils_buildexe import py2exe as _py2exe
+
+        class cmd_py2exe(_py2exe):
+            def run(self):
+                root = get_root()
+                cfg = get_config_from_root(root)
+                versions = get_versions()
+                target_versionfile = cfg.versionfile_source
+                print("UPDATING %s" % target_versionfile)
+                write_to_version_file(target_versionfile, versions)
+
+                _py2exe.run(self)
+                os.unlink(target_versionfile)
+                with open(cfg.versionfile_source, "w") as f:
+                    LONG = LONG_VERSION_PY[cfg.VCS]
+                    f.write(
+                        LONG
+                        % {
+                            "DOLLAR": "$",
+                            "STYLE": cfg.style,
+                            "TAG_PREFIX": cfg.tag_prefix,
+                            "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+                            "VERSIONFILE_SOURCE": cfg.versionfile_source,
+                        }
+                    )
+
+        cmds["py2exe"] = cmd_py2exe
+
     # we override different "sdist" commands for both environments
-    if "setuptools" in sys.modules:
+    if "sdist" in cmds:
+        _sdist = cmds["sdist"]
+    elif "setuptools" in sys.modules:
         from setuptools.command.sdist import sdist as _sdist
     else:
         from distutils.command.sdist import sdist as _sdist
@@ -1523,8 +1729,10 @@ def get_cmdclass():
             # updated value
             target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
             print("UPDATING %s" % target_versionfile)
-            write_to_version_file(target_versionfile,
-                                  self._versioneer_generated_versions)
+            write_to_version_file(
+                target_versionfile, self._versioneer_generated_versions
+            )
+
     cmds["sdist"] = cmd_sdist
 
     return cmds
@@ -1539,7 +1747,7 @@ a section like:
  style = pep440
  versionfile_source = src/myproject/_version.py
  versionfile_build = myproject/_version.py
- tag_prefix = ""
+ tag_prefix =
  parentdir_prefix = myproject-
 
 You will also need to edit your setup.py to use the results:
@@ -1575,14 +1783,17 @@ del get_versions
 
 
 def do_setup():
+    """Do main VCS-independent setup function for installing Versioneer."""
     root = get_root()
     try:
         cfg = get_config_from_root(root)
-    except (EnvironmentError, configparser.NoSectionError,
-            configparser.NoOptionError) as e:
+    except (
+        EnvironmentError,
+        configparser.NoSectionError,
+        configparser.NoOptionError,
+    ) as e:
         if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
-            print("Adding sample versioneer config to setup.cfg",
-                  file=sys.stderr)
+            print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
             with open(os.path.join(root, "setup.cfg"), "a") as f:
                 f.write(SAMPLE_CONFIG)
         print(CONFIG_ERROR, file=sys.stderr)
@@ -1591,15 +1802,18 @@ def do_setup():
     print(" creating %s" % cfg.versionfile_source)
     with open(cfg.versionfile_source, "w") as f:
         LONG = LONG_VERSION_PY[cfg.VCS]
-        f.write(LONG % {"DOLLAR": "$",
-                        "STYLE": cfg.style,
-                        "TAG_PREFIX": cfg.tag_prefix,
-                        "PARENTDIR_PREFIX": cfg.parentdir_prefix,
-                        "VERSIONFILE_SOURCE": cfg.versionfile_source,
-                        })
+        f.write(
+            LONG
+            % {
+                "DOLLAR": "$",
+                "STYLE": cfg.style,
+                "TAG_PREFIX": cfg.tag_prefix,
+                "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+                "VERSIONFILE_SOURCE": cfg.versionfile_source,
+            }
+        )
 
-    ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
-                       "__init__.py")
+    ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
     if os.path.exists(ipy):
         try:
             with open(ipy, "r") as f:
@@ -1641,21 +1855,24 @@ def do_setup():
     else:
         print(" 'versioneer.py' already in MANIFEST.in")
     if cfg.versionfile_source not in simple_includes:
-        print(" appending versionfile_source ('%s') to MANIFEST.in" %
-              cfg.versionfile_source)
+        print(
+            " appending versionfile_source ('%s') to MANIFEST.in"
+            % cfg.versionfile_source
+        )
         with open(manifest_in, "a") as f:
             f.write("include %s\n" % cfg.versionfile_source)
     else:
         print(" versionfile_source already in MANIFEST.in")
 
     # Make VCS-specific changes. For git, this means creating/changing
-    # .gitattributes to mark _version.py for export-time keyword
+    # .gitattributes to mark _version.py for export-subst keyword
     # substitution.
     do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
     return 0
 
 
 def scan_setup_py():
+    """Validate the contents of setup.py against Versioneer's expectations."""
     found = set()
     setters = False
     errors = 0
@@ -1690,6 +1907,7 @@ def scan_setup_py():
         errors += 1
     return errors
 
+
 if __name__ == "__main__":
     cmd = sys.argv[1]
     if cmd == "setup":
